#define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
gboolean
-sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
+sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, ScanCopyContext ctx)
{
MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
MonoClass *klass = vt->klass;
for (; elem < card_end; elem += elem_size)
scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
} else {
- CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
+ ScanPtrFieldFunc scan_ptr_field_func = ctx.ops->scan_ptr_field;
HEAVY_STAT (++los_array_cards);
- for (; elem < card_end; elem += SIZEOF_VOID_P) {
- GCObject *new_;
- gpointer old = *(gpointer*)elem;
- if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
- HEAVY_STAT (++los_array_remsets);
- copy_func ((GCObject**)elem, ctx.queue);
- new_ = *(GCObject **)elem;
- if (G_UNLIKELY (sgen_ptr_in_nursery (new_)))
- sgen_add_to_global_remset (elem, new_);
- }
- }
+ for (; elem < card_end; elem += SIZEOF_VOID_P)
+ scan_ptr_field_func (obj, (GCObject**)elem, ctx.queue);
}
binary_protocol_card_scan (first_elem, elem - first_elem);
sgen-los.c \
sgen-major-copy-object.h \
sgen-marksweep-drain-gray-stack.h \
- sgen-marksweep-scan-object-concurrent.h \
sgen-marksweep.c \
sgen-memory-governor.c \
sgen-memory-governor.h \
sgen-pinning.h \
sgen-pointer-queue.c \
sgen-pointer-queue.h \
+ sgen-array-list.h \
+ sgen-array-list.c \
sgen-protocol-def.h \
sgen-protocol.c \
sgen-protocol.h \
--- /dev/null
+/*
+ * sgen-array-list.c: A pointer array list that doesn't require reallocs
+ *
+ * Copyright (C) 2016 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef HAVE_SGEN_GC
+
+#include <string.h>
+
+#include "mono/sgen/sgen-gc.h"
+#include "mono/sgen/sgen-array-list.h"
+
+static void
+sgen_array_list_grow (SgenArrayList *array, guint32 old_capacity)
+{
+ const guint32 new_bucket = sgen_array_list_index_bucket (old_capacity);
+ const guint32 growth = sgen_array_list_bucket_size (new_bucket);
+ const guint32 new_capacity = old_capacity + growth;
+ const guint32 new_bucket_size = sizeof (**array->entries) * growth;
+ gpointer *entries;
+ if (array->capacity >= new_capacity)
+ return;
+ if (array->mem_type != -1)
+ entries = (gpointer*) sgen_alloc_internal_dynamic (new_bucket_size, array->mem_type, TRUE);
+ else
+ entries = (gpointer*) g_malloc0 (new_bucket_size);
+ if (array->bucket_alloc_callback)
+ array->bucket_alloc_callback (entries, new_bucket_size, TRUE);
+ /*
+ * The zeroing of the newly allocated bucket must be complete before storing
+ * the new bucket pointer.
+ */
+ mono_memory_write_barrier ();
+ if (InterlockedCompareExchangePointer ((volatile gpointer *)&array->entries [new_bucket], entries, NULL) == NULL) {
+ /*
+ * It must not be the case that we succeeded in setting the bucket
+ * pointer, while someone else succeeded in changing the capacity.
+ */
+ if (InterlockedCompareExchange ((volatile gint32 *)&array->capacity, new_capacity, old_capacity) != old_capacity)
+ g_assert_not_reached ();
+ array->slot_hint = old_capacity;
+ return;
+ }
+ /* Someone beat us to the allocation. */
+ if (array->bucket_alloc_callback)
+ array->bucket_alloc_callback (entries, new_bucket_size, FALSE);
+ if (array->mem_type != -1)
+ sgen_free_internal_dynamic (entries, new_bucket_size, array->mem_type);
+ else
+ g_free (entries);
+}
+
+static guint32
+sgen_array_list_find_unset (SgenArrayList *array, guint32 capacity)
+{
+ if (!array->is_slot_set_func) {
+ guint32 next_slot = array->next_slot;
+ /* We can't lookup empty slots, use next_slot */
+ if (next_slot < capacity)
+ return next_slot;
+ } else {
+ guint32 slot_hint = array->slot_hint;
+ guint32 index;
+ volatile gpointer *slot;
+
+ SGEN_ARRAY_LIST_FOREACH_SLOT_RANGE(array, slot_hint, capacity, slot, index) {
+ if (!array->is_slot_set_func (slot))
+ return index;
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT_RANGE;
+
+ SGEN_ARRAY_LIST_FOREACH_SLOT_RANGE (array, 0, slot_hint, slot, index) {
+ if (!array->is_slot_set_func (slot))
+ return index;
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT_RANGE;
+ }
+
+ return -1;
+}
+
+static void
+sgen_array_list_update_next_slot (SgenArrayList *array, guint32 new_index)
+{
+ if (!array->set_slot_func) {
+ /*
+ * If we don't have a custom setter it means we don't have thread
+ * safety requirements.
+ */
+ if (new_index >= array->next_slot)
+ array->next_slot = new_index + 1;
+ } else {
+ guint32 old_next_slot;
+ /* Thread safe update */
+ do {
+ old_next_slot = array->next_slot;
+ if (new_index < old_next_slot)
+ break;
+ } while (InterlockedCompareExchange ((volatile gint32 *)&array->next_slot, new_index + 1, old_next_slot) != old_next_slot);
+ }
+}
+
+guint32
+sgen_array_list_add (SgenArrayList *array, gpointer ptr, int data, gboolean increase_size_before_set)
+{
+ guint32 index, capacity;
+ volatile gpointer *slot;
+
+ if (!array->capacity)
+ sgen_array_list_grow (array, 0);
+retry:
+ capacity = array->capacity;
+ index = sgen_array_list_find_unset (array, capacity);
+ if (index == -1) {
+ sgen_array_list_grow (array, capacity);
+ goto retry;
+ }
+ array->slot_hint = index;
+
+ if (increase_size_before_set) {
+ sgen_array_list_update_next_slot (array, index);
+ mono_memory_write_barrier ();
+ }
+
+ slot = sgen_array_list_get_slot (array, index);
+ if (array->set_slot_func) {
+ if (!array->set_slot_func (slot, ptr, data))
+ goto retry;
+ } else {
+ *slot = ptr;
+ }
+
+ if (!increase_size_before_set) {
+ mono_memory_write_barrier ();
+ sgen_array_list_update_next_slot (array, index);
+ }
+
+ return index;
+}
+
+/*
+ * Removes all NULL pointers from the array. Not thread safe
+ */
+void
+sgen_array_list_remove_nulls (SgenArrayList *array)
+{
+ guint32 start = 0;
+ volatile gpointer *slot;
+
+ SGEN_ARRAY_LIST_FOREACH_SLOT (array, slot) {
+ if (*slot)
+ *sgen_array_list_get_slot (array, start++) = *slot;
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT;
+
+ mono_memory_write_barrier ();
+ array->next_slot = start;
+}
+
+/*
+ * Does a linear search through the pointer array to find `ptr`. Returns the index if
+ * found, otherwise (guint32)-1.
+ */
+guint32
+sgen_array_list_find (SgenArrayList *array, gpointer ptr)
+{
+ volatile gpointer *slot;
+
+ SGEN_ARRAY_LIST_FOREACH_SLOT (array, slot) {
+ if (*slot == ptr)
+ return __index;
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT;
+ return (guint32)-1;
+}
+
+#endif
--- /dev/null
+/*
+ * sgen-array-list.h: A pointer array that doesn't use reallocs.
+ *
+ * Copyright (C) 2016 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MONO_SGEN_ARRAY_LIST_H__
+#define __MONO_SGEN_ARRAY_LIST_H__
+
+#include <glib.h>
+
+#define SGEN_ARRAY_LIST_BUCKETS (32)
+#define SGEN_ARRAY_LIST_MIN_BUCKET_BITS (5)
+#define SGEN_ARRAY_LIST_MIN_BUCKET_SIZE (1 << SGEN_ARRAY_LIST_MIN_BUCKET_BITS)
+
+typedef void (*SgenArrayListBucketAllocCallback) (gpointer *bucket, guint32 new_bucket_size, gboolean alloc);
+typedef gboolean (*SgenArrayListIsSlotSetFunc) (volatile gpointer *slot);
+typedef gboolean (*SgenArrayListSetSlotFunc) (volatile gpointer *slot, gpointer ptr, int data);
+
+/*
+ * 'entries' is an array of pointers to buckets of increasing size. The first
+ * bucket has size 'MIN_BUCKET_SIZE', and each bucket is twice the size of the
+ * previous, i.e.:
+ *
+ * |-------|-- MIN_BUCKET_SIZE
+ * [0] -> xxxxxxxx
+ * [1] -> xxxxxxxxxxxxxxxx
+ * [2] -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ * ...
+ *
+ * 'slot_hint' denotes the position of the last allocation, so that the
+ * whole array needn't be searched on every allocation.
+ *
+ * The size of the spine, 'SGEN_ARRAY_LIST_BUCKETS', is chosen so
+ * that the maximum number of entries is no less than G_MAXUINT32.
+ */
+
+typedef struct {
+ volatile gpointer *volatile entries [SGEN_ARRAY_LIST_BUCKETS];
+ volatile guint32 capacity;
+ volatile guint32 slot_hint;
+ volatile guint32 next_slot;
+ SgenArrayListBucketAllocCallback bucket_alloc_callback;
+ SgenArrayListIsSlotSetFunc is_slot_set_func;
+ SgenArrayListSetSlotFunc set_slot_func;
+ int mem_type; /* sgen internal mem type or -1 for malloc allocation */
+} SgenArrayList;
+
+/*
+ * Computes floor(log2(index + MIN_BUCKET_SIZE)) - 1, giving the index
+ * of the bucket containing a slot.
+ */
+static inline guint32
+sgen_array_list_index_bucket (guint32 index)
+{
+#ifdef __GNUC__
+ return CHAR_BIT * sizeof (index) - __builtin_clz (index + SGEN_ARRAY_LIST_MIN_BUCKET_SIZE) - 1 - SGEN_ARRAY_LIST_MIN_BUCKET_BITS;
+#else
+ guint count = 0;
+ index += SGEN_ARRAY_LIST_MIN_BUCKET_SIZE;
+ while (index) {
+ ++count;
+ index >>= 1;
+ }
+ return count - 1 - SGEN_ARRAY_LIST_MIN_BUCKET_BITS;
+#endif
+}
+
+static inline guint32
+sgen_array_list_bucket_size (guint32 index)
+{
+ return 1 << (index + SGEN_ARRAY_LIST_MIN_BUCKET_BITS);
+}
+
+static inline void
+sgen_array_list_bucketize (guint32 index, guint32 *bucket, guint32 *offset)
+{
+ *bucket = sgen_array_list_index_bucket (index);
+ *offset = index - sgen_array_list_bucket_size (*bucket) + SGEN_ARRAY_LIST_MIN_BUCKET_SIZE;
+}
+
+static inline volatile gpointer *
+sgen_array_list_get_slot (SgenArrayList *array, guint32 index)
+{
+ guint32 bucket, offset;
+
+ SGEN_ASSERT (0, index < array->capacity, "Why are we accessing an entry that is not allocated");
+
+ sgen_array_list_bucketize (index, &bucket, &offset);
+ return &(array->entries [bucket] [offset]);
+}
+
+#define SGEN_ARRAY_LIST_INIT(bucket_alloc_callback, is_slot_set_func, set_slot_func, mem_type) { { NULL }, 0, 0, 0, (bucket_alloc_callback), (is_slot_set_func), (set_slot_func), (mem_type) }
+
+#define SGEN_ARRAY_LIST_FOREACH_SLOT(array, slot) { \
+ guint32 __bucket, __offset; \
+ const guint32 __max_bucket = sgen_array_list_index_bucket ((array)->capacity); \
+ guint32 __index = 0; \
+ const guint32 __next_slot = (array)->next_slot; \
+ for (__bucket = 0; __bucket < __max_bucket; ++__bucket) { \
+ volatile gpointer *__entries = (array)->entries [__bucket]; \
+ for (__offset = 0; __offset < sgen_array_list_bucket_size (__bucket); ++__offset, ++__index) { \
+ if (__index >= __next_slot) \
+ break; \
+ slot = &__entries [__offset];
+
+#define SGEN_ARRAY_LIST_END_FOREACH_SLOT } } }
+
+#define SGEN_ARRAY_LIST_FOREACH_SLOT_RANGE(array, begin, end, slot, index) { \
+ for (index = (begin); index < (end); index++) { \
+ guint32 __bucket, __offset; \
+ volatile gpointer *__entries; \
+ sgen_array_list_bucketize (index, &__bucket, &__offset); \
+ __entries = (array)->entries [__bucket]; \
+ slot = &__entries [__offset];
+
+#define SGEN_ARRAY_LIST_END_FOREACH_SLOT_RANGE } }
+
+guint32 sgen_array_list_add (SgenArrayList *array, gpointer ptr, int data, gboolean increase_size_before_set);
+guint32 sgen_array_list_find (SgenArrayList *array, gpointer ptr);
+void sgen_array_list_remove_nulls (SgenArrayList *array);
+
+#endif
*out_num_cards = num_cards;
}
+/* Preclean cards and saves the cards that need to be scanned afterwards in cards_preclean */
+void
+sgen_card_table_preclean_mod_union (guint8 *cards, guint8 *cards_preclean, size_t num_cards)
+{
+ size_t i;
+
+ memcpy (cards_preclean, cards, num_cards);
+ for (i = 0; i < num_cards; i++) {
+ if (cards_preclean [i]) {
+ cards [i] = 0;
+ }
+ }
+ /*
+ * When precleaning we need to make sure the card cleaning
+ * takes place before the object is scanned. If we don't
+ * do this we could finish scanning the object and, before
+ * the cleaning of the card takes place, another thread
+ * could dirty the object, mark the mod_union card only for
+ * us to clean it back, without scanning the object again.
+ */
+ mono_memory_barrier ();
+}
+
#ifdef SGEN_HAVE_OVERLAPPING_CARDS
static void
sgen_card_table_clear_cards ();
#endif
SGEN_TV_GETTIME (atv);
- sgen_get_major_collector ()->scan_card_table (FALSE, ctx);
+ sgen_get_major_collector ()->scan_card_table (CARDTABLE_SCAN_GLOBAL, ctx);
SGEN_TV_GETTIME (btv);
last_major_scan_time = SGEN_TV_ELAPSED (atv, btv);
major_card_scan_time += last_major_scan_time;
- sgen_los_scan_card_table (FALSE, ctx);
+ sgen_los_scan_card_table (CARDTABLE_SCAN_GLOBAL, ctx);
SGEN_TV_GETTIME (atv);
last_los_scan_time = SGEN_TV_ELAPSED (btv, atv);
los_card_scan_time += last_los_scan_time;
#endif
void
-sgen_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
+sgen_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, ScanCopyContext ctx)
{
HEAVY_STAT (++large_objects);
- if (sgen_client_cardtable_scan_object (obj, block_obj_size, cards, mod_union, ctx))
+ if (sgen_client_cardtable_scan_object (obj, block_obj_size, cards, ctx))
return;
HEAVY_STAT (++bloby_objects);
void* sgen_card_table_align_pointer (void *ptr);
void sgen_card_table_mark_range (mword address, mword size);
void sgen_cardtable_scan_object (GCObject *obj, mword obj_size, guint8 *cards,
- gboolean mod_union, ScanCopyContext ctx);
+ ScanCopyContext ctx);
gboolean sgen_card_table_get_card_data (guint8 *dest, mword address, mword cards);
void sgen_card_table_update_mod_union_from_cards (guint8 *dest, guint8 *start_card, size_t num_cards);
void sgen_card_table_update_mod_union (guint8 *dest, char *obj, mword obj_size, size_t *out_num_cards);
+void sgen_card_table_preclean_mod_union (guint8 *cards, guint8 *cards_preclean, size_t num_cards);
guint8* sgen_get_card_table_configuration (int *shift_bits, gpointer *mask);
* parts of the object based on which cards are marked, do so and return TRUE. Otherwise,
* return FALSE.
*/
-gboolean sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx);
+gboolean sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, ScanCopyContext ctx);
/*
* Called after nursery objects have been pinned. No action is necessary.
static gboolean do_dump_nursery_content = FALSE;
static gboolean enable_nursery_canaries = FALSE;
+static gboolean precleaning_enabled = TRUE;
+
#ifdef HEAVY_STATISTICS
guint64 stat_objects_alloced_degraded = 0;
guint64 stat_bytes_alloced_degraded = 0;
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
g_assert (concurrent_collection_in_progress);
- major_collector.scan_card_table (TRUE, ctx);
+ major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
}
static void
ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
g_assert (concurrent_collection_in_progress);
- sgen_los_scan_card_table (TRUE, ctx);
+ sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION, ctx);
+}
+
+static void
+job_mod_union_preclean (void *worker_data_untyped, SgenThreadPoolJob *job)
+{
+ WorkerData *worker_data = (WorkerData *)worker_data_untyped;
+ ScanJob *job_data = (ScanJob*)job;
+ ScanCopyContext ctx = CONTEXT_FROM_OBJECT_OPERATIONS (job_data->ops, sgen_workers_get_job_gray_queue (worker_data));
+
+ g_assert (concurrent_collection_in_progress);
+
+ major_collector.scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
+ sgen_los_scan_card_table (CARDTABLE_SCAN_MOD_UNION_PRECLEAN, ctx);
}
static void
major_collector.init_to_space ();
+ SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
/*
* The concurrent collector doesn't move objects, neither on
* the major heap nor in the nursery, so we can mark even
* collector we start the workers after pinning.
*/
if (mode == COPY_OR_MARK_FROM_ROOTS_START_CONCURRENT) {
- SGEN_ASSERT (0, sgen_workers_all_done (), "Why are the workers not done when we start or finish a major collection?");
- sgen_workers_start_all_workers (object_ops);
+ if (precleaning_enabled) {
+ ScanJob *sj;
+ /* Mod union preclean job */
+ sj = (ScanJob*)sgen_thread_pool_job_alloc ("preclean mod union cardtable", job_mod_union_preclean, sizeof (ScanJob));
+ sj->ops = object_ops;
+ sgen_workers_start_all_workers (object_ops, &sj->job);
+ } else {
+ sgen_workers_start_all_workers (object_ops, NULL);
+ }
gray_queue_enable_redirect (WORKERS_DISTRIBUTE_GRAY_QUEUE);
} else if (mode == COPY_OR_MARK_FROM_ROOTS_FINISH_CONCURRENT) {
if (sgen_workers_have_idle_work ()) {
- sgen_workers_start_all_workers (object_ops);
+ sgen_workers_start_all_workers (object_ops, NULL);
sgen_workers_join ();
}
}
continue;
}
+ if (!strcmp (opt, "precleaning")) {
+ precleaning_enabled = TRUE;
+ continue;
+ }
+ if (!strcmp (opt, "no-precleaning")) {
+ precleaning_enabled = FALSE;
+ continue;
+ }
+
if (major_collector.handle_gc_param && major_collector.handle_gc_param (opt))
continue;
typedef void (*CopyOrMarkObjectFunc) (GCObject**, SgenGrayQueue*);
typedef void (*ScanObjectFunc) (GCObject *obj, SgenDescriptor desc, SgenGrayQueue*);
typedef void (*ScanVTypeFunc) (GCObject *full_object, char *start, SgenDescriptor desc, SgenGrayQueue* BINARY_PROTOCOL_ARG (size_t size));
+typedef void (*ScanPtrFieldFunc) (GCObject *obj, GCObject **ptr, SgenGrayQueue* queue);
typedef gboolean (*DrainGrayStackFunc) (SgenGrayQueue *queue);
typedef struct {
CopyOrMarkObjectFunc copy_or_mark_object;
ScanObjectFunc scan_object;
ScanVTypeFunc scan_vtype;
+ ScanPtrFieldFunc scan_ptr_field;
/* Drain stack optimized for the above functions */
DrainGrayStackFunc drain_gray_stack;
/*FIXME add allocation function? */
size_t num_unique_scanned_objects;
} ScannedObjectCounts;
+typedef enum {
+ CARDTABLE_SCAN_GLOBAL = 0,
+ CARDTABLE_SCAN_MOD_UNION = 1,
+ CARDTABLE_SCAN_MOD_UNION_PRECLEAN = CARDTABLE_SCAN_MOD_UNION | 2,
+} CardTableScanType;
+
typedef struct _SgenMajorCollector SgenMajorCollector;
struct _SgenMajorCollector {
size_t section_size;
void (*free_non_pinned_object) (GCObject *obj, size_t size);
void (*pin_objects) (SgenGrayQueue *queue);
void (*pin_major_object) (GCObject *obj, SgenGrayQueue *queue);
- void (*scan_card_table) (gboolean mod_union, ScanCopyContext ctx);
+ void (*scan_card_table) (CardTableScanType scan_type, ScanCopyContext ctx);
void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
void (*update_cardtable_mod_union) (void);
void (*init_to_space) (void);
gboolean sgen_ptr_is_in_los (char *ptr, char **start);
void sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data);
void sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
-void sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx);
+void sgen_los_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx);
void sgen_los_update_cardtable_mod_union (void);
void sgen_los_count_cards (long long *num_total_cards, long long *num_marked_cards);
gboolean sgen_los_is_valid_object (char *object);
#include "mono/sgen/sgen-gc.h"
#include "mono/sgen/sgen-client.h"
+#include "mono/sgen/sgen-array-list.h"
#include "mono/utils/mono-membar.h"
#ifdef HEAVY_STATISTICS
static volatile guint32 stat_gc_handles_max_allocated = 0;
#endif
-#define BUCKETS (32 - MONO_GC_HANDLE_TYPE_SHIFT)
-#define MIN_BUCKET_BITS (5)
-#define MIN_BUCKET_SIZE (1 << MIN_BUCKET_BITS)
-
/*
* A table of GC handle data, implementing a simple lock-free bitmap allocator.
*
- * 'entries' is an array of pointers to buckets of increasing size. The first
- * bucket has size 'MIN_BUCKET_SIZE', and each bucket is twice the size of the
- * previous, i.e.:
- *
- * |-------|-- MIN_BUCKET_SIZE
- * [0] -> xxxxxxxx
- * [1] -> xxxxxxxxxxxxxxxx
- * [2] -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- * ...
- *
- * The size of the spine, 'BUCKETS', is chosen so that the maximum number of
- * entries is no less than the maximum index value of a GC handle.
- *
* Each entry in a bucket is a pointer with two tag bits: if
* 'GC_HANDLE_OCCUPIED' returns true for a slot, then the slot is occupied; if
* so, then 'GC_HANDLE_VALID' gives whether the entry refers to a valid (1) or
* object pointer. If the reference is NULL, and 'GC_HANDLE_TYPE_IS_WEAK' is
* true for 'type', then the pointer is a metadata pointer--this allows us to
* retrieve the domain ID of an expired weak reference in Mono.
- *
- * Finally, 'slot_hint' denotes the position of the last allocation, so that the
- * whole array needn't be searched on every allocation.
*/
typedef struct {
- volatile gpointer *volatile entries [BUCKETS];
- volatile guint32 capacity;
- volatile guint32 slot_hint;
- volatile guint32 max_index;
+ SgenArrayList entries_array;
guint8 type;
} HandleData;
-static inline guint
-bucket_size (guint index)
-{
- return 1 << (index + MIN_BUCKET_BITS);
-}
-
-/* Computes floor(log2(index + MIN_BUCKET_SIZE)) - 1, giving the index
- * of the bucket containing a slot.
- */
-static inline guint
-index_bucket (guint index)
-{
-#ifdef __GNUC__
- return CHAR_BIT * sizeof (index) - __builtin_clz (index + MIN_BUCKET_SIZE) - 1 - MIN_BUCKET_BITS;
-#else
- guint count = 0;
- index += MIN_BUCKET_SIZE;
- while (index) {
- ++count;
- index >>= 1;
- }
- return count - 1 - MIN_BUCKET_BITS;
-#endif
-}
-
-static inline void
-bucketize (guint index, guint *bucket, guint *offset)
-{
- *bucket = index_bucket (index);
- *offset = index - bucket_size (*bucket) + MIN_BUCKET_SIZE;
-}
-
static void
protocol_gchandle_update (int handle_type, gpointer link, gpointer old_value, gpointer new_value)
{
return NULL;
}
+static inline gboolean
+is_slot_set (volatile gpointer *slot)
+{
+ gpointer entry = *slot;
+ if (MONO_GC_HANDLE_OCCUPIED (entry))
+ return TRUE;
+ return FALSE;
+}
+
/* Try to claim a slot by setting its occupied bit. */
static inline gboolean
-try_occupy_slot (HandleData *handles, guint bucket, guint offset, GCObject *obj, gboolean track)
+try_occupy_slot (volatile gpointer *slot, gpointer obj, int data)
{
- volatile gpointer *link_addr = &(handles->entries [bucket] [offset]);
- if (MONO_GC_HANDLE_OCCUPIED (*link_addr))
+ if (is_slot_set (slot))
return FALSE;
- return try_set_slot (link_addr, obj, NULL, (GCHandleType)handles->type) != NULL;
+ return try_set_slot (slot, (GCObject *)obj, NULL, (GCHandleType)data) != NULL;
+}
+
+static void
+bucket_alloc_callback (gpointer *bucket, guint32 new_bucket_size, gboolean alloc)
+{
+ if (alloc)
+ sgen_register_root ((char *)bucket, new_bucket_size, SGEN_DESCRIPTOR_NULL, ROOT_TYPE_PINNED, MONO_ROOT_SOURCE_GC_HANDLE, "pinned gc handles");
+ else
+ sgen_deregister_root ((char *)bucket);
}
static HandleData gc_handles [] = {
- { { NULL }, 0, 0, 0, (HANDLE_WEAK) },
- { { NULL }, 0, 0, 0, (HANDLE_WEAK_TRACK) },
- { { NULL }, 0, 0, 0, (HANDLE_NORMAL) },
- { { NULL }, 0, 0, 0, (HANDLE_PINNED) }
+ { SGEN_ARRAY_LIST_INIT (NULL, is_slot_set, try_occupy_slot, -1), (HANDLE_WEAK) },
+ { SGEN_ARRAY_LIST_INIT (NULL, is_slot_set, try_occupy_slot, -1), (HANDLE_WEAK_TRACK) },
+ { SGEN_ARRAY_LIST_INIT (NULL, is_slot_set, try_occupy_slot, -1), (HANDLE_NORMAL) },
+ { SGEN_ARRAY_LIST_INIT (bucket_alloc_callback, is_slot_set, try_occupy_slot, -1), (HANDLE_PINNED) }
};
static HandleData *
sgen_mark_normal_gc_handles (void *addr, SgenUserMarkFunc mark_func, void *gc_data)
{
HandleData *handles = gc_handles_for_type (HANDLE_NORMAL);
- size_t bucket, offset;
- const guint max_bucket = index_bucket (handles->capacity);
- guint32 index = 0;
- const guint32 max_index = handles->max_index;
- for (bucket = 0; bucket < max_bucket; ++bucket) {
- volatile gpointer *entries = handles->entries [bucket];
- for (offset = 0; offset < bucket_size (bucket); ++offset, ++index) {
- volatile gpointer *entry;
- gpointer hidden, revealed;
- /* No need to iterate beyond the largest index ever allocated. */
- if (index > max_index)
- return;
- entry = &entries [offset];
- hidden = *entry;
- revealed = MONO_GC_REVEAL_POINTER (hidden, FALSE);
- if (!MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden))
- continue;
- mark_func ((MonoObject **)&revealed, gc_data);
- g_assert (revealed);
- *entry = MONO_GC_HANDLE_OBJECT_POINTER (revealed, FALSE);
- }
- }
+ SgenArrayList *array = &handles->entries_array;
+ volatile gpointer *slot;
+ gpointer hidden, revealed;
+
+ SGEN_ARRAY_LIST_FOREACH_SLOT (array, slot) {
+ hidden = *slot;
+ revealed = MONO_GC_REVEAL_POINTER (hidden, FALSE);
+ if (!MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden))
+ continue;
+ mark_func ((MonoObject **)&revealed, gc_data);
+ g_assert (revealed);
+ *slot = MONO_GC_HANDLE_OBJECT_POINTER (revealed, FALSE);
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT;
}
-static guint
-handle_data_find_unset (HandleData *handles, guint32 begin, guint32 end)
-{
- guint index;
- gint delta = begin < end ? +1 : -1;
- for (index = begin; index < end; index += delta) {
- guint bucket, offset;
- volatile gpointer *entries;
- bucketize (index, &bucket, &offset);
- entries = handles->entries [bucket];
- g_assert (entries);
- if (!MONO_GC_HANDLE_OCCUPIED (entries [offset]))
- return index;
- }
- return -1;
-}
-
-/* Adds a bucket if necessary and possible. */
-static void
-handle_data_grow (HandleData *handles, guint32 old_capacity)
-{
- const guint new_bucket = index_bucket (old_capacity);
- const guint32 growth = bucket_size (new_bucket);
- const guint32 new_capacity = old_capacity + growth;
- gpointer *entries;
- const size_t new_bucket_size = sizeof (**handles->entries) * growth;
- if (handles->capacity >= new_capacity)
- return;
- entries = (gpointer *)g_malloc0 (new_bucket_size);
- if (handles->type == HANDLE_PINNED)
- sgen_register_root ((char *)entries, new_bucket_size, SGEN_DESCRIPTOR_NULL, ROOT_TYPE_PINNED, MONO_ROOT_SOURCE_GC_HANDLE, "pinned gc handles");
- /* The zeroing of the newly allocated bucket must be complete before storing
- * the new bucket pointer.
- */
- mono_memory_write_barrier ();
- if (InterlockedCompareExchangePointer ((volatile gpointer *)&handles->entries [new_bucket], entries, NULL) == NULL) {
- /* It must not be the case that we succeeded in setting the bucket
- * pointer, while someone else succeeded in changing the capacity.
- */
- if (InterlockedCompareExchange ((volatile gint32 *)&handles->capacity, new_capacity, old_capacity) != old_capacity)
- g_assert_not_reached ();
- handles->slot_hint = old_capacity;
- return;
- }
- /* Someone beat us to the allocation. */
- if (handles->type == HANDLE_PINNED)
- sgen_deregister_root ((char *)entries);
- g_free (entries);
-}
static guint32
alloc_handle (HandleData *handles, GCObject *obj, gboolean track)
{
- guint index;
- guint32 res;
- guint bucket, offset;
- guint32 capacity;
- guint32 slot_hint;
- guint32 max_index;
- if (!handles->capacity)
- handle_data_grow (handles, 0);
-retry:
- capacity = handles->capacity;
- slot_hint = handles->slot_hint;
- index = handle_data_find_unset (handles, slot_hint, capacity);
- if (index == -1)
- index = handle_data_find_unset (handles, 0, slot_hint);
- if (index == -1) {
- handle_data_grow (handles, capacity);
- goto retry;
- }
- handles->slot_hint = index;
+ guint32 res, index;
+ SgenArrayList *array = &handles->entries_array;
/*
* If a GC happens shortly after a new bucket is allocated, the entire
* we track the maximum index seen so far, so that we can skip the empty
* slots.
*
- * Note that we update `max_index` before we even try occupying the
+ * Note that we update `next_slot` before we even try occupying the
* slot. If we did it the other way around and a GC happened in
* between, the GC wouldn't know that the slot was occupied. This is
* not a huge deal since `obj` is on the stack and thus pinned anyway,
* but hopefully some day it won't be anymore.
*/
- do {
- max_index = handles->max_index;
- if (index <= max_index)
- break;
- } while (InterlockedCompareExchange ((volatile gint32 *)&handles->max_index, index, max_index) != max_index);
-
- bucketize (index, &bucket, &offset);
- if (!try_occupy_slot (handles, bucket, offset, obj, track))
- goto retry;
+ index = sgen_array_list_add (array, obj, handles->type, TRUE);
#ifdef HEAVY_STATISTICS
InterlockedIncrement ((volatile gint32 *)&stat_gc_handles_allocated);
if (stat_gc_handles_allocated > stat_gc_handles_max_allocated)
sgen_gchandle_iterate (GCHandleType handle_type, int max_generation, SgenGCHandleIterateCallback callback, gpointer user)
{
HandleData *handle_data = gc_handles_for_type (handle_type);
- size_t bucket, offset;
- guint max_bucket = index_bucket (handle_data->capacity);
- guint32 index = 0;
- guint32 max_index = handle_data->max_index;
+ SgenArrayList *array = &handle_data->entries_array;
+ gpointer hidden, result, occupied;
+ volatile gpointer *slot;
+
/* If a new bucket has been allocated, but the capacity has not yet been
* increased, nothing can yet have been allocated in the bucket because the
* world is stopped, so we shouldn't miss any handles during iteration.
*/
- for (bucket = 0; bucket < max_bucket; ++bucket) {
- volatile gpointer *entries = handle_data->entries [bucket];
- for (offset = 0; offset < bucket_size (bucket); ++offset, ++index) {
- gpointer hidden;
- gpointer result;
- /* Table must contain no garbage pointers. */
- gboolean occupied;
- /* No need to iterate beyond the largest index ever allocated. */
- if (index > max_index)
- return;
- hidden = entries [offset];
- occupied = MONO_GC_HANDLE_OCCUPIED (hidden);
- g_assert (hidden ? occupied : !occupied);
- if (!occupied)
- continue;
- result = callback (hidden, handle_type, max_generation, user);
- if (result)
- SGEN_ASSERT (0, MONO_GC_HANDLE_OCCUPIED (result), "Why did the callback return an unoccupied entry?");
- else
- HEAVY_STAT (InterlockedDecrement ((volatile gint32 *)&stat_gc_handles_allocated));
- protocol_gchandle_update (handle_type, (gpointer)&entries [offset], hidden, result);
- entries [offset] = result;
- }
- }
+ SGEN_ARRAY_LIST_FOREACH_SLOT (array, slot) {
+ hidden = *slot;
+ occupied = (gpointer) MONO_GC_HANDLE_OCCUPIED (hidden);
+ g_assert (hidden ? !!occupied : !occupied);
+ if (!occupied)
+ continue;
+ result = callback (hidden, handle_type, max_generation, user);
+ if (result)
+ SGEN_ASSERT (0, MONO_GC_HANDLE_OCCUPIED (result), "Why did the callback return an unoccupied entry?");
+ else
+ HEAVY_STAT (InterlockedDecrement ((volatile gint32 *)&stat_gc_handles_allocated));
+ protocol_gchandle_update (handle_type, (gpointer)slot, hidden, result);
+ *slot = result;
+ } SGEN_ARRAY_LIST_END_FOREACH_SLOT;
}
/**
/* Invalid handles are possible; accessing one should produce NULL. (#34276) */
if (!handles)
return NULL;
- guint bucket, offset;
- g_assert (index < handles->capacity);
- bucketize (index, &bucket, &offset);
- return link_get (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
+ return link_get (sgen_array_list_get_slot (&handles->entries_array, index), MONO_GC_HANDLE_TYPE_IS_WEAK (type));
}
void
sgen_gchandle_set_target (guint32 gchandle, GCObject *obj)
{
- guint index = MONO_GC_HANDLE_SLOT (gchandle);
+ guint32 index = MONO_GC_HANDLE_SLOT (gchandle);
GCHandleType type = MONO_GC_HANDLE_TYPE (gchandle);
HandleData *handles = gc_handles_for_type (type);
+ volatile gpointer *slot;
+ gpointer entry;
+
if (!handles)
return;
- guint bucket, offset;
- gpointer slot;
- g_assert (index < handles->capacity);
- bucketize (index, &bucket, &offset);
+ slot = sgen_array_list_get_slot (&handles->entries_array, index);
do {
- slot = handles->entries [bucket] [offset];
- SGEN_ASSERT (0, MONO_GC_HANDLE_OCCUPIED (slot), "Why are we setting the target on an unoccupied slot?");
- } while (!try_set_slot (&handles->entries [bucket] [offset], obj, slot, (GCHandleType)handles->type));
+ entry = *slot;
+ SGEN_ASSERT (0, MONO_GC_HANDLE_OCCUPIED (entry), "Why are we setting the target on an unoccupied slot?");
+ } while (!try_set_slot (slot, obj, entry, (GCHandleType)handles->type));
}
static gpointer
-mono_gchandle_slot_metadata (volatile gpointer *slot_addr, gboolean is_weak)
+mono_gchandle_slot_metadata (volatile gpointer *slot, gboolean is_weak)
{
- gpointer slot;
+ gpointer entry;
gpointer metadata;
retry:
- slot = *slot_addr;
- if (!MONO_GC_HANDLE_OCCUPIED (slot))
+ entry = *slot;
+ if (!MONO_GC_HANDLE_OCCUPIED (entry))
return NULL;
- if (MONO_GC_HANDLE_IS_OBJECT_POINTER (slot)) {
- GCObject *obj = (GCObject *)MONO_GC_REVEAL_POINTER (slot, is_weak);
+ if (MONO_GC_HANDLE_IS_OBJECT_POINTER (entry)) {
+ GCObject *obj = (GCObject *)MONO_GC_REVEAL_POINTER (entry, is_weak);
/* See note [dummy use]. */
sgen_dummy_use (obj);
/*
* at this point and recompute it later, in which case we would still use
* it.
*/
- if (*slot_addr != slot)
+ if (*slot != entry)
goto retry;
return sgen_client_metadata_for_object (obj);
}
- metadata = MONO_GC_REVEAL_POINTER (slot, is_weak);
+ metadata = MONO_GC_REVEAL_POINTER (entry, is_weak);
/* See note [dummy use]. */
sgen_dummy_use (metadata);
- if (*slot_addr != slot)
+ if (*slot != entry)
goto retry;
return metadata;
}
gpointer
sgen_gchandle_get_metadata (guint32 gchandle)
{
- guint index = MONO_GC_HANDLE_SLOT (gchandle);
+ guint32 index = MONO_GC_HANDLE_SLOT (gchandle);
GCHandleType type = MONO_GC_HANDLE_TYPE (gchandle);
HandleData *handles = gc_handles_for_type (type);
+ volatile gpointer *slot;
+
if (!handles)
return NULL;
- guint bucket, offset;
- if (index >= handles->capacity)
+ if (index >= handles->entries_array.capacity)
return NULL;
- bucketize (index, &bucket, &offset);
- return mono_gchandle_slot_metadata (&handles->entries [bucket] [offset], MONO_GC_HANDLE_TYPE_IS_WEAK (type));
+
+ slot = sgen_array_list_get_slot (&handles->entries_array, index);
+
+ return mono_gchandle_slot_metadata (slot, MONO_GC_HANDLE_TYPE_IS_WEAK (type));
}
/**
void
mono_gchandle_free (guint32 gchandle)
{
- guint index = MONO_GC_HANDLE_SLOT (gchandle);
+ guint32 index = MONO_GC_HANDLE_SLOT (gchandle);
GCHandleType type = MONO_GC_HANDLE_TYPE (gchandle);
HandleData *handles = gc_handles_for_type (type);
+ volatile gpointer *slot;
+ gpointer entry;
if (!handles)
return;
- guint bucket, offset;
- gpointer slot;
- bucketize (index, &bucket, &offset);
- slot = handles->entries [bucket] [offset];
- if (index < handles->capacity && MONO_GC_HANDLE_OCCUPIED (slot)) {
- handles->entries [bucket] [offset] = NULL;
- protocol_gchandle_update (handles->type, (gpointer)&handles->entries [bucket] [offset], slot, NULL);
+
+ slot = sgen_array_list_get_slot (&handles->entries_array, index);
+ entry = *slot;
+
+ if (index < handles->entries_array.capacity && MONO_GC_HANDLE_OCCUPIED (entry)) {
+ *slot = NULL;
+ protocol_gchandle_update (handles->type, (gpointer)slot, entry, NULL);
HEAVY_STAT (InterlockedDecrement ((volatile gint32 *)&stat_gc_handles_allocated));
} else {
/* print a warning? */
unsigned char *free_chunk_map;
};
+/* We allow read only access on the list while sweep is not running */
LOSObject *los_object_list = NULL;
mword los_memory_usage = 0;
*vtslot = vtable;
sgen_update_heap_boundaries ((mword)obj->data, (mword)obj->data + size);
obj->next = los_object_list;
+ /*
+ * We need a memory barrier so we don't expose as head of the los object list
+ * a LOSObject that doesn't have its fields initialized.
+ */
+ mono_memory_write_barrier ();
los_object_list = obj;
los_memory_usage += size;
los_num_objects++;
}
void
-sgen_los_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
+sgen_los_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx)
{
LOSObject *obj;
- binary_protocol_los_card_table_scan_start (sgen_timestamp (), mod_union);
+ binary_protocol_los_card_table_scan_start (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
for (obj = los_object_list; obj; obj = obj->next) {
+ mword num_cards = 0;
guint8 *cards;
if (!SGEN_OBJECT_HAS_REFERENCES (obj->data))
continue;
- if (mod_union) {
+ if (scan_type & CARDTABLE_SCAN_MOD_UNION) {
if (!sgen_los_object_is_pinned (obj->data))
continue;
cards = get_cardtable_mod_union_for_object (obj);
g_assert (cards);
+ if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN) {
+ guint8 *cards_preclean;
+ mword obj_size = sgen_los_object_size (obj);
+ num_cards = sgen_card_table_number_of_cards_in_range ((mword) obj->data, obj_size);
+ cards_preclean = (guint8 *)sgen_alloc_internal_dynamic (num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
+
+ sgen_card_table_preclean_mod_union (cards, cards_preclean, num_cards);
+
+ cards = cards_preclean;
+ }
} else {
cards = NULL;
}
- sgen_cardtable_scan_object (obj->data, sgen_los_object_size (obj), cards, mod_union, ctx);
+ sgen_cardtable_scan_object (obj->data, sgen_los_object_size (obj), cards, ctx);
+
+ if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN)
+ sgen_free_internal_dynamic (cards, num_cards, INTERNAL_MEM_CARDTABLE_MOD_UNION);
}
- binary_protocol_los_card_table_scan_end (sgen_timestamp (), mod_union);
+ binary_protocol_los_card_table_scan_end (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
}
void
#include "sgen-scan-object.h"
}
+#ifdef SCAN_VTYPE_FUNCTION_NAME
+static void
+SCAN_VTYPE_FUNCTION_NAME (GCObject *full_object, char *start, SgenDescriptor desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
+{
+ SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
+
+#ifdef HEAVY_STATISTICS
+ /* FIXME: We're half scanning this object. How do we account for that? */
+ //add_scanned_object (start);
+#endif
+
+ /* The descriptors include info about the object header as well */
+ start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
+
+ /* We use the same HANDLE_PTR from the obj scan function */
+#define SCAN_OBJECT_NOVTABLE
+#define SCAN_OBJECT_PROTOCOL
+#include "sgen-scan-object.h"
+
+ SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
+}
+#endif
+
+#ifdef SCAN_PTR_FIELD_FUNCTION_NAME
+static void
+SCAN_PTR_FIELD_FUNCTION_NAME (GCObject *full_object, GCObject **ptr, SgenGrayQueue *queue)
+{
+ HANDLE_PTR (ptr, NULL);
+}
+#endif
+
static gboolean
DRAIN_GRAY_STACK_FUNCTION_NAME (SgenGrayQueue *queue)
{
#undef COPY_OR_MARK_FUNCTION_NAME
#undef COPY_OR_MARK_WITH_EVACUATION
+#undef COPY_OR_MARK_CONCURRENT
+#undef COPY_OR_MARK_CONCURRENT_WITH_EVACUATION
#undef SCAN_OBJECT_FUNCTION_NAME
+#undef SCAN_VTYPE_FUNCTION_NAME
+#undef SCAN_PTR_FIELD_FUNCTION_NAME
#undef DRAIN_GRAY_STACK_FUNCTION_NAME
+++ /dev/null
-/*
- * sgen-major-scan-object.h: Object scanning in the major collectors.
- *
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * FIXME: We use the same scanning function in the concurrent collector whether we scan
- * during the starting/finishing collection pause (with the world stopped) or from the
- * concurrent worker thread.
- *
- * As long as the world is stopped, we should just follow pointers into the nursery and
- * evict if possible. In that case we also don't need the ALWAYS_ADD_TO_GLOBAL_REMSET case,
- * which only seems to make sense for when the world is stopped, in which case we only need
- * it because we don't follow into the nursery.
- */
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- GCObject *__old = *(ptr); \
- binary_protocol_scan_process_reference ((full_object), (ptr), __old); \
- if (__old) { \
- gboolean __still_in_nursery = major_copy_or_mark_object_with_evacuation ((ptr), __old, queue); \
- if (G_UNLIKELY (__still_in_nursery && !sgen_ptr_in_nursery ((ptr)) && !SGEN_OBJECT_IS_CEMENTED (*(ptr)))) { \
- GCObject *__copy = *(ptr); \
- sgen_add_to_global_remset ((ptr), __copy); \
- } \
- } \
- } while (0)
-
-
-static void
-major_scan_vtype_concurrent_finish (GCObject *full_object, char *start, SgenDescriptor desc, SgenGrayQueue *queue BINARY_PROTOCOL_ARG (size_t size))
-{
- SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP;
-
-#ifdef HEAVY_STATISTICS
- /* FIXME: We're half scanning this object. How do we account for that? */
- //add_scanned_object (start);
-#endif
-
- /* The descriptors include info about the object header as well */
- start -= SGEN_CLIENT_OBJECT_HEADER_SIZE;
-
-#define SCAN_OBJECT_NOVTABLE
-#define SCAN_OBJECT_PROTOCOL
-#include "sgen-scan-object.h"
-
- SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP;
-}
#include "mono/sgen/sgen-memory-governor.h"
#include "mono/sgen/sgen-layout-stats.h"
#include "mono/sgen/sgen-pointer-queue.h"
+#include "mono/sgen/sgen-array-list.h"
#include "mono/sgen/sgen-pinning.h"
#include "mono/sgen/sgen-workers.h"
#include "mono/sgen/sgen-thread-pool.h"
#include "mono/sgen/sgen-client.h"
-#include "mono/utils/mono-membar.h"
+#include "mono/utils/mono-memory-model.h"
#if defined(ARCH_MIN_MS_BLOCK_SIZE) && defined(ARCH_MIN_MS_BLOCK_SIZE_SHIFT)
#define MS_BLOCK_SIZE ARCH_MIN_MS_BLOCK_SIZE
#define BLOCK_TAG(bl) ((bl)->has_references ? BLOCK_TAG_HAS_REFERENCES ((bl)) : (bl))
/* all allocated blocks in the system */
-static SgenPointerQueue allocated_blocks;
+static SgenArrayList allocated_blocks = SGEN_ARRAY_LIST_INIT (NULL, NULL, NULL, INTERNAL_MEM_PIN_QUEUE);
/* non-allocated block free-list */
static void *empty_blocks = NULL;
static size_t num_empty_blocks = 0;
-#define FOREACH_BLOCK_NO_LOCK_CONDITION(cond,bl) { \
- size_t __index; \
- SGEN_ASSERT (0, (cond) && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
- for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
- (bl) = BLOCK_UNTAG (allocated_blocks.data [__index]);
-#define FOREACH_BLOCK_NO_LOCK(bl) \
- FOREACH_BLOCK_NO_LOCK_CONDITION(sgen_is_world_stopped (), bl)
+#define FOREACH_BLOCK_NO_LOCK(bl) { \
+ volatile gpointer *slot; \
+ SGEN_ASSERT (0, !sweep_in_progress (), "Can't iterate blocks while sweep is in progress."); \
+ SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) { \
+ (bl) = BLOCK_UNTAG (*slot);
#define FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK(bl,hr) { \
- size_t __index; \
- SGEN_ASSERT (0, sgen_is_world_stopped () && !sweep_in_progress (), "Can't iterate blocks while the world is running or sweep is in progress."); \
- for (__index = 0; __index < allocated_blocks.next_slot; ++__index) { \
- (bl) = (MSBlockInfo *)allocated_blocks.data [__index]; \
+ volatile gpointer *slot; \
+ SGEN_ASSERT (0, !sweep_in_progress (), "Can't iterate blocks while sweep is in progress."); \
+ SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) { \
+ (bl) = (MSBlockInfo *) (*slot); \
(hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); \
(bl) = BLOCK_UNTAG ((bl));
-#define END_FOREACH_BLOCK_NO_LOCK } }
+#define END_FOREACH_BLOCK_NO_LOCK } SGEN_ARRAY_LIST_END_FOREACH_SLOT; }
static volatile size_t num_major_sections = 0;
/*
g_assert (block->free_list);
/* the block must be in the allocated_blocks array */
- g_assert (sgen_pointer_queue_find (&allocated_blocks, BLOCK_TAG (block)) != (size_t)-1);
+ g_assert (sgen_array_list_find (&allocated_blocks, BLOCK_TAG (block)) != (guint32)-1);
}
}
major_finish_sweep_checking ();
mono_memory_barrier ();
- sgen_pointer_queue_add (&allocated_blocks, BLOCK_TAG (info));
+ sgen_array_list_add (&allocated_blocks, BLOCK_TAG (info), 0, FALSE);
SGEN_ATOMIC_ADD_P (num_major_sections, 1);
return TRUE;
SGEN_ASSERT (0, success, "Could not set sweep state.");
}
-static gboolean ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked);
+static gboolean ensure_block_is_checked_for_sweeping (guint32 block_index, gboolean wait, gboolean *have_checked);
static SgenThreadPoolJob * volatile sweep_job;
static void
major_finish_sweep_checking (void)
{
- int block_index;
+ guint32 block_index;
SgenThreadPoolJob *job;
retry:
} else {
sgen_los_mark_mod_union_card (obj, ptr);
}
-
binary_protocol_mod_union_remset (obj, ptr, value_obj, SGEN_LOAD_VTABLE (value_obj));
}
#define COPY_OR_MARK_WITH_EVACUATION
#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_with_evacuation
#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_with_evacuation
+#define SCAN_VTYPE_FUNCTION_NAME major_scan_vtype_with_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_with_evacuation
+#define SCAN_PTR_FIELD_FUNCTION_NAME major_scan_ptr_field_with_evacuation
#include "sgen-marksweep-drain-gray-stack.h"
-#undef COPY_OR_MARK_WITH_EVACUATION
#define COPY_OR_MARK_CONCURRENT
#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_concurrent_no_evacuation
#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_concurrent_no_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_concurrent_no_evacuation
#include "sgen-marksweep-drain-gray-stack.h"
-#undef COPY_OR_MARK_CONCURRENT
#define COPY_OR_MARK_CONCURRENT_WITH_EVACUATION
#define COPY_OR_MARK_FUNCTION_NAME major_copy_or_mark_object_concurrent_with_evacuation
#define SCAN_OBJECT_FUNCTION_NAME major_scan_object_concurrent_with_evacuation
+#define SCAN_VTYPE_FUNCTION_NAME major_scan_vtype_concurrent_with_evacuation
+#define SCAN_PTR_FIELD_FUNCTION_NAME major_scan_ptr_field_concurrent_with_evacuation
#define DRAIN_GRAY_STACK_FUNCTION_NAME drain_gray_stack_concurrent_with_evacuation
#include "sgen-marksweep-drain-gray-stack.h"
return drain_gray_stack_concurrent_no_evacuation (queue);
}
-#include "sgen-marksweep-scan-object-concurrent.h"
-
static void
major_copy_or_mark_object_canonical (GCObject **ptr, SgenGrayQueue *queue)
{
* be correct, i.e. must not be used.
*/
static gboolean
-ensure_block_is_checked_for_sweeping (int block_index, gboolean wait, gboolean *have_checked)
+ensure_block_is_checked_for_sweeping (guint32 block_index, gboolean wait, gboolean *have_checked)
{
int count;
gboolean have_live = FALSE;
int i;
void *tagged_block;
MSBlockInfo *block;
+ volatile gpointer *block_slot = sgen_array_list_get_slot (&allocated_blocks, block_index);
SGEN_ASSERT (6, sweep_in_progress (), "Why do we call this function if there's no sweep in progress?");
*have_checked = FALSE;
retry:
- tagged_block = *(void * volatile *)&allocated_blocks.data [block_index];
+ tagged_block = *(void * volatile *)block_slot;
if (!tagged_block)
return FALSE;
goto retry;
}
- if (SGEN_CAS_PTR (&allocated_blocks.data [block_index], BLOCK_TAG_CHECKING (tagged_block), tagged_block) != tagged_block)
+ if (SGEN_CAS_PTR (block_slot, BLOCK_TAG_CHECKING (tagged_block), tagged_block) != tagged_block)
goto retry;
block = BLOCK_UNTAG (tagged_block);
* block list and freed.
*/
SGEN_ASSERT (6, block_index < allocated_blocks.next_slot, "How did the number of blocks shrink?");
- SGEN_ASSERT (6, allocated_blocks.data [block_index] == BLOCK_TAG_CHECKING (tagged_block), "How did the block move?");
+ SGEN_ASSERT (6, *block_slot == BLOCK_TAG_CHECKING (tagged_block), "How did the block move?");
binary_protocol_empty (MS_BLOCK_OBJ (block, 0), (char*)MS_BLOCK_OBJ (block, count) - (char*)MS_BLOCK_OBJ (block, 0));
ms_free_block (block);
}
done:
- allocated_blocks.data [block_index] = tagged_block;
+ *block_slot = tagged_block;
return !!tagged_block;
}
static void
sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
{
- int block_index;
- int num_blocks = num_major_sections_before_sweep;
+ guint32 block_index;
+ guint32 num_blocks = num_major_sections_before_sweep;
SGEN_ASSERT (0, sweep_in_progress (), "Sweep thread called with wrong state");
SGEN_ASSERT (0, num_blocks <= allocated_blocks.next_slot, "How did we lose blocks?");
* cooperate with the sweep thread to finish sweeping, and they will traverse from
* low to high, to avoid constantly colliding on the same blocks.
*/
- for (block_index = num_blocks - 1; block_index >= 0; --block_index) {
- gboolean have_checked;
-
+ for (block_index = num_blocks; block_index-- > 0;) {
/*
* The block might have been freed by another thread doing some checking
* work.
*/
- if (!ensure_block_is_checked_for_sweeping (block_index, TRUE, &have_checked))
+ if (!ensure_block_is_checked_for_sweeping (block_index, TRUE, NULL))
++num_major_sections_freed_in_sweep;
}
if (SGEN_MAX_ASSERT_LEVEL >= 6) {
for (block_index = num_blocks; block_index < allocated_blocks.next_slot; ++block_index) {
- MSBlockInfo *block = BLOCK_UNTAG (allocated_blocks.data [block_index]);
+ MSBlockInfo *block = BLOCK_UNTAG (*sgen_array_list_get_slot (&allocated_blocks, block_index));
SGEN_ASSERT (6, block && block->state == BLOCK_STATE_SWEPT, "How did a new block to be swept get added while swept?");
}
}
- sgen_pointer_queue_remove_nulls (&allocated_blocks);
- mono_memory_barrier ();
+ sgen_array_list_remove_nulls (&allocated_blocks);
sweep_finish ();
*/
major_finish_sweep_checking ();
- FOREACH_BLOCK_NO_LOCK_CONDITION (TRUE, block) {
+ FOREACH_BLOCK_NO_LOCK (block) {
int count = MS_BLOCK_FREE / block->obj_size;
void **iter;
size += count * block->obj_size;
#define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
static void
-scan_card_table_for_block (MSBlockInfo *block, gboolean mod_union, ScanCopyContext ctx)
+scan_card_table_for_block (MSBlockInfo *block, CardTableScanType scan_type, ScanCopyContext ctx)
{
SgenGrayQueue *queue = ctx.queue;
ScanObjectFunc scan_func = ctx.ops->scan_object;
#ifndef SGEN_HAVE_OVERLAPPING_CARDS
guint8 cards_copy [CARDS_PER_BLOCK];
#endif
+ guint8 cards_preclean [CARDS_PER_BLOCK];
gboolean small_objects;
int block_obj_size;
char *block_start;
guint8 *card_data_end;
char *scan_front = NULL;
+ /* The concurrent mark doesn't enter evacuating blocks */
+ if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN && major_block_is_evacuating (block))
+ return;
+
block_obj_size = block->obj_size;
small_objects = block_obj_size < CARD_SIZE_IN_BYTES;
* Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
* sizes, they won't overflow the cardtable overlap modulus.
*/
- if (mod_union) {
+ if (scan_type & CARDTABLE_SCAN_MOD_UNION) {
card_data = card_base = block->cardtable_mod_union;
/*
* This happens when the nursery collection that precedes finishing
*/
if (!card_data)
return;
+
+ if (scan_type == CARDTABLE_SCAN_MOD_UNION_PRECLEAN) {
+ sgen_card_table_preclean_mod_union (card_data, cards_preclean, CARDS_PER_BLOCK);
+ card_data = card_base = cards_preclean;
+ }
} else {
#ifdef SGEN_HAVE_OVERLAPPING_CARDS
card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
if (obj < scan_front || !MS_OBJ_ALLOCED_FAST (obj, block_start))
goto next_object;
- if (mod_union) {
+ if (scan_type & CARDTABLE_SCAN_MOD_UNION) {
/* FIXME: do this more efficiently */
int w, b;
MS_CALC_MARK_BIT (w, b, obj);
scan_func (object, sgen_obj_get_descriptor (object), queue);
} else {
size_t offset = sgen_card_table_get_card_offset (obj, block_start);
- sgen_cardtable_scan_object (object, block_obj_size, card_base + offset, mod_union, ctx);
+ sgen_cardtable_scan_object (object, block_obj_size, card_base + offset, ctx);
}
next_object:
obj += block_obj_size;
}
static void
-major_scan_card_table (gboolean mod_union, ScanCopyContext ctx)
+major_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx)
{
MSBlockInfo *block;
gboolean has_references;
if (!concurrent_mark)
- g_assert (!mod_union);
+ g_assert (scan_type == CARDTABLE_SCAN_GLOBAL);
major_finish_sweep_checking ();
- binary_protocol_major_card_table_scan_start (sgen_timestamp (), mod_union);
+ binary_protocol_major_card_table_scan_start (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
#ifdef PREFETCH_CARDS
int prefetch_index = __index + 6;
if (prefetch_index < allocated_blocks.next_slot) {
- MSBlockInfo *prefetch_block = BLOCK_UNTAG (allocated_blocks.data [prefetch_index]);
- guint8 *prefetch_cards = sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (prefetch_block));
+ MSBlockInfo *prefetch_block = BLOCK_UNTAG (*sgen_array_list_get_slot (&allocated_blocks, prefetch_index));
PREFETCH_READ (prefetch_block);
- PREFETCH_WRITE (prefetch_cards);
- PREFETCH_WRITE (prefetch_cards + 32);
+ if (scan_type == CARDTABLE_SCAN_GLOBAL) {
+ guint8 *prefetch_cards = sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (prefetch_block));
+ PREFETCH_WRITE (prefetch_cards);
+ PREFETCH_WRITE (prefetch_cards + 32);
+ }
}
#endif
if (!has_references)
continue;
- scan_card_table_for_block (block, mod_union, ctx);
+ scan_card_table_for_block (block, scan_type, ctx);
} END_FOREACH_BLOCK_NO_LOCK;
- binary_protocol_major_card_table_scan_end (sgen_timestamp (), mod_union);
+ binary_protocol_major_card_table_scan_end (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
}
static void
if (is_concurrent) {
collector->major_ops_concurrent_start.copy_or_mark_object = major_copy_or_mark_object_concurrent_canonical;
collector->major_ops_concurrent_start.scan_object = major_scan_object_concurrent_with_evacuation;
+ collector->major_ops_concurrent_start.scan_vtype = major_scan_vtype_concurrent_with_evacuation;
+ collector->major_ops_concurrent_start.scan_ptr_field = major_scan_ptr_field_concurrent_with_evacuation;
collector->major_ops_concurrent_start.drain_gray_stack = drain_gray_stack_concurrent;
collector->major_ops_concurrent_finish.copy_or_mark_object = major_copy_or_mark_object_concurrent_finish_canonical;
collector->major_ops_concurrent_finish.scan_object = major_scan_object_with_evacuation;
- collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_concurrent_finish;
+ collector->major_ops_concurrent_finish.scan_vtype = major_scan_vtype_with_evacuation;
+ collector->major_ops_concurrent_finish.scan_ptr_field = major_scan_ptr_field_with_evacuation;
collector->major_ops_concurrent_finish.drain_gray_stack = drain_gray_stack;
}
#include "sgen-scan-object.h"
}
+static void
+SERIAL_SCAN_PTR_FIELD (GCObject *full_object, GCObject **ptr, SgenGrayQueue *queue)
+{
+ HANDLE_PTR (ptr, NULL);
+}
+
#define FILL_MINOR_COLLECTOR_SCAN_OBJECT(collector) do { \
(collector)->serial_ops.scan_object = SERIAL_SCAN_OBJECT; \
(collector)->serial_ops.scan_vtype = SERIAL_SCAN_VTYPE; \
+ (collector)->serial_ops.scan_ptr_field = SERIAL_SCAN_PTR_FIELD; \
} while (0)
static volatile State workers_state;
static SgenObjectOperations * volatile idle_func_object_ops;
+static SgenThreadPoolJob * volatile preclean_job;
static guint64 stat_workers_num_finished;
sgen_drain_gray_stack (ctx);
} else {
- worker_try_finish ();
+ SgenThreadPoolJob *job = preclean_job;
+ if (job) {
+ sgen_thread_pool_job_enqueue (job);
+ preclean_job = NULL;
+ } else {
+ worker_try_finish ();
+ }
}
}
void
sgen_workers_stop_all_workers (void)
{
+ preclean_job = NULL;
+ mono_memory_write_barrier ();
forced_stop = TRUE;
sgen_thread_pool_wait_for_all_jobs ();
}
void
-sgen_workers_start_all_workers (SgenObjectOperations *object_ops)
+sgen_workers_start_all_workers (SgenObjectOperations *object_ops, SgenThreadPoolJob *job)
{
forced_stop = FALSE;
idle_func_object_ops = object_ops;
+ preclean_job = job;
mono_memory_write_barrier ();
sgen_workers_ensure_awake ();
void sgen_workers_init (int num_workers);
void sgen_workers_stop_all_workers (void);
-void sgen_workers_start_all_workers (SgenObjectOperations *object_ops);
+void sgen_workers_start_all_workers (SgenObjectOperations *object_ops, SgenThreadPoolJob *finish_job);
void sgen_workers_ensure_awake (void);
void sgen_workers_init_distribute_gray_queue (void);
void sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue);
#define mono_atomic_load_release(_type,target) ({ \
_type __tmp; \
LOAD_RELEASE_FENCE; \
- __tmp = *target; \
+ __tmp = *(target); \
__tmp; })
#define mono_atomic_load_acquire(var,_type,target) do { \
- _type __tmp = *target; \
+ _type __tmp = *(target); \
LOAD_ACQUIRE_FENCE; \
(var) = __tmp; \
} while (0)
#define mono_atomic_store_acquire(target,value) { \
- *target = value; \
+ *(target) = (value); \
STORE_ACQUIRE_FENCE; \
}
<ClCompile Include="..\mono\sgen\sgen-pinning-stats.c" />\r
<ClCompile Include="..\mono\sgen\sgen-pinning.c" />\r
<ClCompile Include="..\mono\sgen\sgen-pointer-queue.c" />\r
+ <ClCompile Include="..\mono\sgen\sgen-array-list.c" />\r
<ClCompile Include="..\mono\sgen\sgen-protocol.c" />\r
<ClCompile Include="..\mono\sgen\sgen-qsort.c" />\r
<ClCompile Include="..\mono\sgen\sgen-simple-nursery.c" />\r
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
<ImportGroup Label="ExtensionTargets">\r
</ImportGroup>\r
-</Project>
\ No newline at end of file
+</Project>\r