*
* (C) Copyright 2011 Novell, Inc
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
/*
#include <stdlib.h>
#include <mono/utils/atomic.h>
+#ifdef SGEN_WITHOUT_MONO
+#include <mono/sgen/sgen-gc.h>
+#include <mono/sgen/sgen-client.h>
+#else
#include <mono/utils/mono-mmap.h>
+#endif
#include <mono/utils/mono-membar.h>
#include <mono/utils/hazard-pointer.h>
#include <mono/utils/lock-free-queue.h>
MonoLockFreeAllocator *heap;
volatile Anchor anchor;
unsigned int slot_size;
+ unsigned int block_size;
unsigned int max_count;
gpointer sb;
#ifndef DESC_AVAIL_DUMMY
#define NUM_DESC_BATCH 64
-#define SB_SIZE 16384
-#define SB_HEADER_SIZE 16
-#define SB_USABLE_SIZE (SB_SIZE - SB_HEADER_SIZE)
-
-#define SB_HEADER_FOR_ADDR(a) ((gpointer)((size_t)(a) & ~(size_t)(SB_SIZE-1)))
-#define DESCRIPTOR_FOR_ADDR(a) (*(Descriptor**)SB_HEADER_FOR_ADDR (a))
+static MONO_ALWAYS_INLINE gpointer
+sb_header_for_addr (gpointer addr, size_t block_size)
+{
+ return (gpointer)(((size_t)addr) & (~(block_size - 1)));
+}
/* Taken from SGen */
return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
}
-static void*
-mono_sgen_alloc_os_memory (size_t size, int activate)
+static gpointer
+alloc_sb (Descriptor *desc)
{
- return mono_valloc (0, size, prot_flags_for_activate (activate));
-}
+ static int pagesize = -1;
-static void
-mono_sgen_free_os_memory (void *addr, size_t size)
-{
- mono_vfree (addr, size);
-}
+ gpointer sb_header;
-/* size must be a power of 2 */
-static void*
-mono_sgen_alloc_os_memory_aligned (size_t size, size_t alignment, gboolean activate)
-{
- return mono_valloc_aligned (size, alignment, prot_flags_for_activate (activate));
-}
+ if (pagesize == -1)
+ pagesize = mono_pagesize ();
-static gpointer
-alloc_sb (Descriptor *desc)
-{
- gpointer sb_header = mono_sgen_alloc_os_memory_aligned (SB_SIZE, SB_SIZE, TRUE);
- g_assert (sb_header == SB_HEADER_FOR_ADDR (sb_header));
- DESCRIPTOR_FOR_ADDR (sb_header) = desc;
+ sb_header = desc->block_size == pagesize ?
+ mono_valloc (NULL, desc->block_size, prot_flags_for_activate (TRUE)) :
+ mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE));
+
+ g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));
+
+ *(Descriptor**)sb_header = desc;
//g_print ("sb %p for %p\n", sb_header, desc);
- return (char*)sb_header + SB_HEADER_SIZE;
+
+ return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
}
static void
-free_sb (gpointer sb)
+free_sb (gpointer sb, size_t block_size)
{
- gpointer sb_header = SB_HEADER_FOR_ADDR (sb);
- g_assert ((char*)sb_header + SB_HEADER_SIZE == sb);
- mono_sgen_free_os_memory (sb_header, SB_SIZE);
+ gpointer sb_header = sb_header_for_addr (sb, block_size);
+ g_assert ((char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE == sb);
+ mono_vfree (sb_header, block_size);
//g_print ("free sb %p\n", sb_header);
}
for (;;) {
gboolean success;
- desc = get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
+ desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
if (desc) {
Descriptor *next = desc->next;
success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc);
Descriptor *d;
int i;
- desc = mono_sgen_alloc_os_memory (desc_size * NUM_DESC_BATCH, TRUE);
+ desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE));
/* Organize into linked list. */
d = desc;
success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL);
if (!success)
- mono_sgen_free_os_memory (desc, desc_size * NUM_DESC_BATCH);
+ mono_vfree (desc, desc_size * NUM_DESC_BATCH);
}
mono_hazard_pointer_clear (hp, 1);
static void
desc_enqueue_avail (gpointer _desc)
{
- Descriptor *desc = _desc;
+ Descriptor *desc = (Descriptor *) _desc;
Descriptor *old_head;
g_assert (desc->anchor.data.state == STATE_EMPTY);
g_assert (desc->anchor.data.state == STATE_EMPTY);
g_assert (desc->in_use);
desc->in_use = FALSE;
- free_sb (desc->sb);
- mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, FALSE, TRUE);
+ free_sb (desc->sb, desc->block_size);
+ mono_thread_hazardous_try_free (desc, desc_enqueue_avail);
}
#else
MonoLockFreeQueue available_descs;
static void
desc_retire (Descriptor *desc)
{
- free_sb (desc->sb);
+ free_sb (desc->sb, desc->block_size);
mono_lock_free_queue_enqueue (&available_descs, &desc->node);
}
#endif
static void
desc_put_partial (gpointer _desc)
{
- Descriptor *desc = _desc;
+ Descriptor *desc = (Descriptor *) _desc;
g_assert (desc->anchor.data.state != STATE_FULL);
list_put_partial (Descriptor *desc)
{
g_assert (desc->anchor.data.state != STATE_FULL);
- mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
+ mono_thread_hazardous_try_free (desc, desc_put_partial);
}
static void
desc_retire (desc);
} else {
g_assert (desc->heap->sc == sc);
- mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
+ mono_thread_hazardous_try_free (desc, desc_put_partial);
if (++num_non_empty >= 2)
return;
}
do {
unsigned int next;
-
new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
if (old_anchor.data.state == STATE_EMPTY) {
/* We must free it because we own it. */
mono_memory_read_barrier ();
next = *(unsigned int*)addr;
- g_assert (next < SB_USABLE_SIZE / desc->slot_size);
+ g_assert (next < LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size);
new_anchor.data.avail = next;
--new_anchor.data.count;
static gpointer
alloc_from_new_sb (MonoLockFreeAllocator *heap)
{
- unsigned int slot_size, count, i;
+ unsigned int slot_size, block_size, count, i;
Descriptor *desc = desc_alloc ();
- desc->sb = alloc_sb (desc);
-
slot_size = desc->slot_size = heap->sc->slot_size;
- count = SB_USABLE_SIZE / slot_size;
-
- /* Organize blocks into linked list. */
- for (i = 1; i < count - 1; ++i)
- *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;
+ block_size = desc->block_size = heap->sc->block_size;
+ count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / slot_size;
desc->heap = heap;
/*
desc->anchor.data.count = desc->max_count - 1;
desc->anchor.data.state = STATE_PARTIAL;
+ desc->sb = alloc_sb (desc);
+
+ /* Organize blocks into linked list. */
+ for (i = 1; i < count - 1; ++i)
+ *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;
+
mono_memory_write_barrier ();
/* Make it active or free it again. */
}
void
-mono_lock_free_free (gpointer ptr)
+mono_lock_free_free (gpointer ptr, size_t block_size)
{
Anchor old_anchor, new_anchor;
Descriptor *desc;
gpointer sb;
MonoLockFreeAllocator *heap = NULL;
- desc = DESCRIPTOR_FOR_ADDR (ptr);
+ desc = *(Descriptor**) sb_header_for_addr (ptr, block_size);
+ g_assert (block_size == desc->block_size);
+
sb = desc->sb;
- g_assert (SB_HEADER_FOR_ADDR (ptr) == SB_HEADER_FOR_ADDR (sb));
do {
new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
*(unsigned int*)ptr = old_anchor.data.avail;
new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size;
- g_assert (new_anchor.data.avail < SB_USABLE_SIZE / desc->slot_size);
+ g_assert (new_anchor.data.avail < LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / desc->slot_size);
if (old_anchor.data.state == STATE_FULL)
new_anchor.data.state = STATE_PARTIAL;
descriptor_check_consistency (Descriptor *desc, gboolean print)
{
int count = desc->anchor.data.count;
- int max_count = SB_USABLE_SIZE / desc->slot_size;
+ int max_count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size;
#if _MSC_VER
gboolean* linked = alloca(max_count*sizeof(gboolean));
#else
}
void
-mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size)
+mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size, unsigned int block_size)
{
- g_assert (slot_size <= SB_USABLE_SIZE / 2);
+ g_assert (block_size > 0);
+ g_assert ((block_size & (block_size - 1)) == 0); /* check if power of 2 */
+ g_assert (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size));
mono_lock_free_queue_init (&sc->partial);
sc->slot_size = slot_size;
+ sc->block_size = block_size;
}
void