X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Futils%2Flock-free-alloc.c;h=66cb6ea3f8075fc44c9e6793c6bb86b5a7350844;hb=ff49850dfc18f5991246a203184fa1e0b8a7c7ab;hp=1e5cad6498ea236a582ad51a272c4b0e39a31d30;hpb=4f021d81ca18b1aca6fd46165becb49bb9c63c25;p=mono.git diff --git a/mono/utils/lock-free-alloc.c b/mono/utils/lock-free-alloc.c index 1e5cad6498e..66cb6ea3f80 100644 --- a/mono/utils/lock-free-alloc.c +++ b/mono/utils/lock-free-alloc.c @@ -83,7 +83,12 @@ #include #include +#ifdef SGEN_WITHOUT_MONO +#include +#include +#else #include +#endif #include #include #include @@ -113,6 +118,7 @@ struct _MonoLockFreeAllocDescriptor { MonoLockFreeAllocator *heap; volatile Anchor anchor; unsigned int slot_size; + unsigned int block_size; unsigned int max_count; gpointer sb; #ifndef DESC_AVAIL_DUMMY @@ -123,12 +129,11 @@ struct _MonoLockFreeAllocDescriptor { #define NUM_DESC_BATCH 64 -#define SB_SIZE 16384 -#define SB_HEADER_SIZE 16 -#define SB_USABLE_SIZE (SB_SIZE - SB_HEADER_SIZE) - -#define SB_HEADER_FOR_ADDR(a) ((gpointer)((gulong)(a) & ~(gulong)(SB_SIZE-1))) -#define DESCRIPTOR_FOR_ADDR(a) (*(Descriptor**)SB_HEADER_FOR_ADDR (a)) +static MONO_ALWAYS_INLINE gpointer +sb_header_for_addr (gpointer addr, size_t block_size) +{ + return (gpointer)(((size_t)addr) & (~(block_size - 1))); +} /* Taken from SGen */ @@ -139,41 +144,34 @@ prot_flags_for_activate (int activate) return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON; } -static void* -mono_sgen_alloc_os_memory (size_t size, int activate) +static gpointer +alloc_sb (Descriptor *desc) { - return mono_valloc (0, size, prot_flags_for_activate (activate)); -} + static int pagesize = -1; -static void -mono_sgen_free_os_memory (void *addr, size_t size) -{ - mono_vfree (addr, size); -} + gpointer sb_header; -/* size must be a power of 2 */ -static void* -mono_sgen_alloc_os_memory_aligned (size_t size, size_t alignment, gboolean activate) -{ - return mono_valloc_aligned (size, alignment, prot_flags_for_activate (activate)); -} + if (pagesize == -1) + pagesize = mono_pagesize (); -static gpointer -alloc_sb (Descriptor *desc) -{ - gpointer sb_header = mono_sgen_alloc_os_memory_aligned (SB_SIZE, SB_SIZE, TRUE); - g_assert (sb_header == SB_HEADER_FOR_ADDR (sb_header)); - DESCRIPTOR_FOR_ADDR (sb_header) = desc; + sb_header = desc->block_size == pagesize ? + mono_valloc (NULL, desc->block_size, prot_flags_for_activate (TRUE)) : + mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE)); + + g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size)); + + *(Descriptor**)sb_header = desc; //g_print ("sb %p for %p\n", sb_header, desc); - return (char*)sb_header + SB_HEADER_SIZE; + + return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE; } static void -free_sb (gpointer sb) +free_sb (gpointer sb, size_t block_size) { - gpointer sb_header = SB_HEADER_FOR_ADDR (sb); - g_assert ((char*)sb_header + SB_HEADER_SIZE == sb); - mono_sgen_free_os_memory (sb_header, SB_SIZE); + gpointer sb_header = sb_header_for_addr (sb, block_size); + g_assert ((char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE == sb); + mono_vfree (sb_header, block_size); //g_print ("free sb %p\n", sb_header); } @@ -189,7 +187,7 @@ desc_alloc (void) for (;;) { gboolean success; - desc = get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1); + desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1); if (desc) { Descriptor *next = desc->next; success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc); @@ -198,7 +196,7 @@ desc_alloc (void) Descriptor *d; int i; - desc = mono_sgen_alloc_os_memory (desc_size * NUM_DESC_BATCH, TRUE); + desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE)); /* Organize into linked list. */ d = desc; @@ -214,7 +212,7 @@ desc_alloc (void) success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL); if (!success) - mono_sgen_free_os_memory (desc, desc_size * NUM_DESC_BATCH); + mono_vfree (desc, desc_size * NUM_DESC_BATCH); } mono_hazard_pointer_clear (hp, 1); @@ -232,7 +230,7 @@ desc_alloc (void) static void desc_enqueue_avail (gpointer _desc) { - Descriptor *desc = _desc; + Descriptor *desc = (Descriptor *) _desc; Descriptor *old_head; g_assert (desc->anchor.data.state == STATE_EMPTY); @@ -251,8 +249,8 @@ desc_retire (Descriptor *desc) g_assert (desc->anchor.data.state == STATE_EMPTY); g_assert (desc->in_use); desc->in_use = FALSE; - free_sb (desc->sb); - mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, FALSE, TRUE); + free_sb (desc->sb, desc->block_size); + mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX); } #else MonoLockFreeQueue available_descs; @@ -271,7 +269,7 @@ desc_alloc (void) static void desc_retire (Descriptor *desc) { - free_sb (desc->sb); + free_sb (desc->sb, desc->block_size); mono_lock_free_queue_enqueue (&available_descs, &desc->node); } #endif @@ -292,7 +290,7 @@ list_get_partial (MonoLockFreeAllocSizeClass *sc) static void desc_put_partial (gpointer _desc) { - Descriptor *desc = _desc; + Descriptor *desc = (Descriptor *) _desc; g_assert (desc->anchor.data.state != STATE_FULL); @@ -304,7 +302,7 @@ static void list_put_partial (Descriptor *desc) { g_assert (desc->anchor.data.state != STATE_FULL); - mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE); + mono_thread_hazardous_free_or_queue (desc, desc_put_partial, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX); } static void @@ -323,7 +321,7 @@ list_remove_empty_desc (MonoLockFreeAllocSizeClass *sc) desc_retire (desc); } else { g_assert (desc->heap->sc == sc); - mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE); + mono_thread_hazardous_free_or_queue (desc, desc_put_partial, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX); if (++num_non_empty >= 2) return; } @@ -373,7 +371,6 @@ alloc_from_active_or_partial (MonoLockFreeAllocator *heap) do { unsigned int next; - new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value; if (old_anchor.data.state == STATE_EMPTY) { /* We must free it because we own it. */ @@ -388,7 +385,7 @@ alloc_from_active_or_partial (MonoLockFreeAllocator *heap) mono_memory_read_barrier (); next = *(unsigned int*)addr; - g_assert (next < SB_USABLE_SIZE / desc->slot_size); + g_assert (next < LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size); new_anchor.data.avail = next; --new_anchor.data.count; @@ -409,17 +406,12 @@ alloc_from_active_or_partial (MonoLockFreeAllocator *heap) static gpointer alloc_from_new_sb (MonoLockFreeAllocator *heap) { - unsigned int slot_size, count, i; + unsigned int slot_size, block_size, count, i; Descriptor *desc = desc_alloc (); - desc->sb = alloc_sb (desc); - slot_size = desc->slot_size = heap->sc->slot_size; - count = SB_USABLE_SIZE / slot_size; - - /* Organize blocks into linked list. */ - for (i = 1; i < count - 1; ++i) - *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1; + block_size = desc->block_size = heap->sc->block_size; + count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / slot_size; desc->heap = heap; /* @@ -433,6 +425,12 @@ alloc_from_new_sb (MonoLockFreeAllocator *heap) desc->anchor.data.count = desc->max_count - 1; desc->anchor.data.state = STATE_PARTIAL; + desc->sb = alloc_sb (desc); + + /* Organize blocks into linked list. */ + for (i = 1; i < count - 1; ++i) + *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1; + mono_memory_write_barrier (); /* Make it active or free it again. */ @@ -465,22 +463,23 @@ mono_lock_free_alloc (MonoLockFreeAllocator *heap) } void -mono_lock_free_free (gpointer ptr) +mono_lock_free_free (gpointer ptr, size_t block_size) { Anchor old_anchor, new_anchor; Descriptor *desc; gpointer sb; MonoLockFreeAllocator *heap = NULL; - desc = DESCRIPTOR_FOR_ADDR (ptr); + desc = *(Descriptor**) sb_header_for_addr (ptr, block_size); + g_assert (block_size == desc->block_size); + sb = desc->sb; - g_assert (SB_HEADER_FOR_ADDR (ptr) == SB_HEADER_FOR_ADDR (sb)); do { new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value; *(unsigned int*)ptr = old_anchor.data.avail; new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size; - g_assert (new_anchor.data.avail < SB_USABLE_SIZE / desc->slot_size); + g_assert (new_anchor.data.avail < LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / desc->slot_size); if (old_anchor.data.state == STATE_FULL) new_anchor.data.state = STATE_PARTIAL; @@ -530,7 +529,7 @@ static void descriptor_check_consistency (Descriptor *desc, gboolean print) { int count = desc->anchor.data.count; - int max_count = SB_USABLE_SIZE / desc->slot_size; + int max_count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size; #if _MSC_VER gboolean* linked = alloca(max_count*sizeof(gboolean)); #else @@ -607,12 +606,15 @@ mono_lock_free_allocator_check_consistency (MonoLockFreeAllocator *heap) } void -mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size) +mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size, unsigned int block_size) { - g_assert (slot_size <= SB_USABLE_SIZE / 2); + g_assert (block_size > 0); + g_assert ((block_size & (block_size - 1)) == 0); /* check if power of 2 */ + g_assert (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size)); mono_lock_free_queue_init (&sc->partial); sc->slot_size = slot_size; + sc->block_size = block_size; } void