-/*
- * sgen-gray.c: Gray queue management.
+/**
+ * \file
+ * Gray queue management.
*
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
* Copyright (C) 2012 Xamarin Inc
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#ifdef HAVE_SGEN_GC
#define STATE_ASSERT(s,v)
#endif
+/*
+ * Whenever we dispose a gray queue, we save its free list. Then, in the next collection,
+ * we reuse that free list for the new gray queue.
+ */
+static GrayQueueSection *last_gray_queue_free_list;
+
void
-sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue)
+sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel)
{
GrayQueueSection *section;
- HEAVY_STAT (stat_gray_queue_section_alloc ++);
-
- if (queue->alloc_prepare_func)
- queue->alloc_prepare_func (queue);
-
if (queue->free_list) {
/* Use the previously allocated queue sections if possible */
section = queue->free_list;
queue->free_list = section->next;
STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
} else {
+ HEAVY_STAT (stat_gray_queue_section_alloc ++);
+
/* Allocate a new section */
- section = sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
+ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
}
- section->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
+ /* Section is empty */
+ section->size = 0;
STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
/* Link it with the others */
section->next = queue->first;
+ section->prev = NULL;
+ if (queue->first)
+ queue->first->prev = section;
+ else
+ queue->last = section;
queue->first = section;
queue->cursor = section->entries - 1;
+
+ if (is_parallel) {
+ mono_memory_write_barrier ();
+ /*
+ * FIXME
+ * we could probably optimize the code to only rely on the write barrier
+ * for synchronization with the stealer thread. Additionally we could also
+ * do a write barrier once every other gray queue change, and request
+ * to have a minimum of sections before stealing, to keep consistency.
+ */
+ InterlockedIncrement (&queue->num_sections);
+ } else {
+ queue->num_sections++;
+ }
}
void
*/
void
-sgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc)
+sgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc, gboolean is_parallel)
{
GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
if (queue->first) {
- /* Set the current section size back to default, might have been changed by sgen_gray_object_dequeue_section */
+ /*
+ * We don't actively update the section size with each push/pop. For the first
+ * section we determine the size from the cursor position. For the reset of the
+ * sections we need to have the size set.
+ */
queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
}
- sgen_gray_object_alloc_queue_section (queue);
+ sgen_gray_object_alloc_queue_section (queue, is_parallel);
}
STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
#endif
}
+/*
+ * We attempt to spread the objects in the gray queue across a number
+ * of sections. If the queue has more sections, then it's already spread,
+ * if it doesn't have enough sections, then we allocate as many as we
+ * can.
+ */
+void
+sgen_gray_object_spread (SgenGrayQueue *queue, int num_sections)
+{
+ GrayQueueSection *section_start, *section_end;
+ int total_entries = 0, num_entries_per_section;
+ int num_sections_final;
+
+ if (queue->num_sections >= num_sections)
+ return;
+
+ if (!queue->first)
+ return;
+
+ /* Compute number of elements in the gray queue */
+ queue->first->size = queue->cursor - queue->first->entries + 1;
+ total_entries = queue->first->size;
+ for (section_start = queue->first->next; section_start != NULL; section_start = section_start->next) {
+ SGEN_ASSERT (0, section_start->size == SGEN_GRAY_QUEUE_SECTION_SIZE, "We expect all section aside from the first one to be full");
+ total_entries += section_start->size;
+ }
+
+ /* Compute how many sections we should have and elements per section */
+ num_sections_final = (total_entries > num_sections) ? num_sections : total_entries;
+ num_entries_per_section = total_entries / num_sections_final;
+
+ /* Allocate all needed sections */
+ while (queue->num_sections < num_sections_final)
+ sgen_gray_object_alloc_queue_section (queue, TRUE);
+
+ /* Spread out the elements in the sections. By design, sections at the end are fuller. */
+ section_start = queue->first;
+ section_end = queue->last;
+ while (section_start != section_end) {
+ /* We move entries from end to start, until they meet */
+ while (section_start->size < num_entries_per_section) {
+ GrayQueueEntry entry;
+ if (section_end->size <= num_entries_per_section) {
+ section_end = section_end->prev;
+ if (section_end == section_start)
+ break;
+ }
+ if (section_end->size <= num_entries_per_section)
+ break;
+
+ section_end->size--;
+ entry = section_end->entries [section_end->size];
+ section_start->entries [section_start->size] = entry;
+ section_start->size++;
+ }
+ section_start = section_start->next;
+ }
+
+ queue->cursor = queue->first->entries + queue->first->size - 1;
+ queue->num_sections = num_sections_final;
+}
+
GrayQueueEntry
-sgen_gray_object_dequeue (SgenGrayQueue *queue)
+sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel)
{
GrayQueueEntry entry;
#endif
if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
- GrayQueueSection *section = queue->first;
+ GrayQueueSection *section;
+ gint32 old_num_sections = 0;
+
+ if (is_parallel)
+ old_num_sections = InterlockedDecrement (&queue->num_sections);
+ else
+ queue->num_sections--;
+
+ if (is_parallel && old_num_sections <= 0) {
+ mono_os_mutex_lock (&queue->steal_mutex);
+ }
+
+ section = queue->first;
queue->first = section->next;
+ if (queue->first) {
+ queue->first->prev = NULL;
+ } else {
+ queue->last = NULL;
+ SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?");
+ }
section->next = queue->free_list;
STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
queue->free_list = section;
queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
+
+ if (is_parallel && old_num_sections <= 0) {
+ mono_os_mutex_unlock (&queue->steal_mutex);
+ }
}
return entry;
if (!queue->first)
return NULL;
+ /* We never steal from this queue */
+ queue->num_sections--;
+
section = queue->first;
queue->first = section->next;
+ if (queue->first)
+ queue->first->prev = NULL;
+ else
+ queue->last = NULL;
section->next = NULL;
section->size = queue->cursor - section->entries + 1;
return section;
}
+GrayQueueSection*
+sgen_gray_object_steal_section (SgenGrayQueue *queue)
+{
+ gint32 sections_remaining;
+ GrayQueueSection *section = NULL;
+
+ /*
+ * With each push/pop into the queue we increment the number of sections.
+ * There is only one thread accessing the top (the owner) and potentially
+ * multiple workers trying to steal sections from the bottom, so we need
+ * to lock. A num sections decrement from the owner means that the first
+ * section is reserved, while a decrement by the stealer means that the
+ * last section is reserved. If after we decrement the num sections, we
+ * have at least one more section present, it means we can't race with
+ * the other thread. If this is not the case the steal end abandons the
+ * pop, setting back the num_sections, while the owner end will take a
+ * lock to make sure we are not racing with the stealer (since the stealer
+ * might have popped an entry and be in the process of updating the entry
+ * that the owner is trying to pop.
+ */
+
+ if (queue->num_sections <= 1)
+ return NULL;
+
+ /* Give up if there is contention on the last section */
+ if (mono_os_mutex_trylock (&queue->steal_mutex) != 0)
+ return NULL;
+
+ sections_remaining = InterlockedDecrement (&queue->num_sections);
+ if (sections_remaining <= 0) {
+ /* The section that we tried to steal might be the head of the queue. */
+ InterlockedIncrement (&queue->num_sections);
+ } else {
+ /* We have reserved for us the tail section of the queue */
+ section = queue->last;
+ SGEN_ASSERT (0, section, "Why we don't have any sections to steal?");
+ SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?");
+ queue->last = section->prev;
+ section->prev = NULL;
+ SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?");
+ queue->last->next = NULL;
+
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
+ }
+
+ mono_os_mutex_unlock (&queue->steal_mutex);
+ return section;
+}
+
void
-sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section)
+sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel)
{
STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
queue->first->size = queue->cursor - queue->first->entries + 1;
section->next = queue->first;
+ section->prev = NULL;
+ if (queue->first)
+ queue->first->prev = section;
+ else
+ queue->last = section;
queue->first = section;
queue->cursor = queue->first->entries + queue->first->size - 1;
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
queue->enqueue_check_func (section->entries [i].obj);
}
#endif
+ if (is_parallel) {
+ mono_memory_write_barrier ();
+ InterlockedIncrement (&queue->num_sections);
+ } else {
+ queue->num_sections++;
+ }
}
void
}
void
-sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func)
+sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list)
{
- g_assert (sgen_gray_object_queue_is_empty (queue));
+ memset (queue, 0, sizeof (SgenGrayQueue));
- queue->alloc_prepare_func = NULL;
- queue->alloc_prepare_data = NULL;
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
queue->enqueue_check_func = enqueue_check_func;
#endif
- /* Free the extra sections allocated during the last collection */
- sgen_gray_object_queue_trim_free_list (queue);
-}
+ mono_os_mutex_init (&queue->steal_mutex);
-static void
-invalid_prepare_func (SgenGrayQueue *queue)
-{
- g_assert_not_reached ();
+ if (reuse_free_list) {
+ queue->free_list = last_gray_queue_free_list;
+ last_gray_queue_free_list = NULL;
+ }
}
void
-sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue)
+sgen_gray_object_queue_dispose (SgenGrayQueue *queue)
{
- sgen_gray_object_queue_init (queue, NULL);
- queue->alloc_prepare_func = invalid_prepare_func;
- queue->alloc_prepare_data = NULL;
-}
+ SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue), "Why are we disposing a gray queue that's not empty?");
-void
-sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
-{
- SGEN_ASSERT (0, !queue->alloc_prepare_func && !queue->alloc_prepare_data, "Can't set gray queue alloc-prepare twice");
- queue->alloc_prepare_func = alloc_prepare_func;
- queue->alloc_prepare_data = data;
-}
+ /* Free the extra sections allocated during the last collection */
+ sgen_gray_object_queue_trim_free_list (queue);
-void
-sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
- GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
-{
- sgen_gray_object_queue_init (queue, enqueue_check_func);
- sgen_gray_queue_set_alloc_prepare (queue, alloc_prepare_func, data);
+ SGEN_ASSERT (0, !last_gray_queue_free_list, "Are we disposing two gray queues after another?");
+ last_gray_queue_free_list = queue->free_list;
+
+ /* just to make sure */
+ memset (queue, 0, sizeof (SgenGrayQueue));
}
void
}
}
-void
-sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue)
-{
- queue->alloc_prepare_func = NULL;
- queue->alloc_prepare_data = NULL;
-}
-
static void
lock_section_queue (SgenSectionGrayQueue *queue)
{
if (!queue->locked)
return;
- mono_mutex_lock (&queue->lock);
+ mono_os_mutex_lock (&queue->lock);
}
static void
if (!queue->locked)
return;
- mono_mutex_unlock (&queue->lock);
+ mono_os_mutex_unlock (&queue->lock);
}
void
queue->locked = locked;
if (locked) {
- mono_mutex_init_recursive (&queue->lock);
+ mono_os_mutex_init_recursive (&queue->lock);
}
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE