2 * lock-free-alloc.c: Lock free allocator.
4 * (C) Copyright 2011 Novell, Inc
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * This is a simplified version of the lock-free allocator described in
29 * Scalable Lock-Free Dynamic Memory Allocation
30 * Maged M. Michael, PLDI 2004
32 * I could not get Michael's allocator working bug free under heavy
33 * stress tests. The paper doesn't provide correctness proof and after
34 * failing to formalize the ownership of descriptors I devised this
37 * Allocation within superblocks proceeds exactly like in Michael's
38 * allocator. The simplification is that a thread has to "acquire" a
39 * descriptor before it can allocate from its superblock. While it owns
40 * the descriptor no other thread can acquire and hence allocate from
41 * it. A consequence of this is that the ABA problem cannot occur, so
42 * we don't need the tag field and don't have to use 64 bit CAS.
44 * Descriptors are stored in two locations: The partial queue and the
45 * active field. They can only be in at most one of those at one time.
46 * If a thread wants to allocate, it needs to get a descriptor. It
47 * tries the active descriptor first, CASing it to NULL. If that
48 * doesn't work, it gets a descriptor out of the partial queue. Once it
49 * has the descriptor it owns it because it is not referenced anymore.
50 * It allocates a slot and then gives the descriptor back (unless it is
53 * Note that it is still possible that a slot is freed while an
54 * allocation is in progress from the same superblock. Ownership in
55 * this case is not complicated, though. If the block was FULL and the
56 * free set it to PARTIAL, the free now owns the block (because FULL
57 * blocks are not referenced from partial and active) and has to give it
58 * back. If the block was PARTIAL then the free doesn't own the block
59 * (because it's either still referenced, or an alloc owns it). A
60 * special case of this is that it has changed from PARTIAL to EMPTY and
61 * now needs to be retired. Technically, the free wouldn't have to do
62 * anything in this case because the first thing an alloc does when it
63 * gets ownership of a descriptor is to check whether it is EMPTY and
64 * retire it if that is the case. As an optimization, our free does try
65 * to acquire the descriptor (by CASing the active field, which, if it
66 * is lucky, points to that descriptor) and if it can do so, retire it.
67 * If it can't, it tries to retire other descriptors from the partial
68 * queue, so that we can be sure that even if no more allocations
69 * happen, descriptors are still retired. This is analogous to what
70 * Michael's allocator does.
72 * Another difference to Michael's allocator is not related to
73 * concurrency, however: We don't point from slots to descriptors.
74 * Instead we allocate superblocks aligned and point from the start of
75 * the superblock to the descriptor, so we only need one word of
76 * metadata per superblock.
78 * FIXME: Having more than one allocator per size class is probably
79 * buggy because it was never tested.
85 #include <mono/utils/atomic.h>
86 #include <mono/utils/mono-mmap.h>
87 #include <mono/utils/mono-membar.h>
88 #include <mono/utils/hazard-pointer.h>
89 #include <mono/utils/lock-free-queue.h>
91 #include <mono/utils/lock-free-alloc.h>
93 //#define DESC_AVAIL_DUMMY
110 typedef struct _MonoLockFreeAllocDescriptor Descriptor;
111 struct _MonoLockFreeAllocDescriptor {
112 MonoLockFreeQueueNode node;
113 MonoLockFreeAllocator *heap;
114 volatile Anchor anchor;
115 unsigned int slot_size;
116 unsigned int block_size;
117 unsigned int max_count;
119 #ifndef DESC_AVAIL_DUMMY
120 Descriptor * volatile next;
122 gboolean in_use; /* used for debugging only */
125 #define NUM_DESC_BATCH 64
127 static MONO_ALWAYS_INLINE gpointer
128 sb_header_for_addr (gpointer addr, size_t block_size)
130 return (gpointer)(((size_t)addr) & (~(block_size - 1)));
133 /* Taken from SGen */
136 prot_flags_for_activate (int activate)
138 unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
139 return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
143 alloc_sb (Descriptor *desc)
145 static int pagesize = -1;
150 pagesize = mono_pagesize ();
152 sb_header = desc->block_size == pagesize ?
153 mono_valloc (0, desc->block_size, prot_flags_for_activate (TRUE)) :
154 mono_valloc_aligned (desc->block_size, desc->block_size, prot_flags_for_activate (TRUE));
156 g_assert (sb_header == sb_header_for_addr (sb_header, desc->block_size));
158 *(Descriptor**)sb_header = desc;
159 //g_print ("sb %p for %p\n", sb_header, desc);
161 return (char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE;
165 free_sb (gpointer sb, size_t block_size)
167 gpointer sb_header = sb_header_for_addr (sb, block_size);
168 g_assert ((char*)sb_header + LOCK_FREE_ALLOC_SB_HEADER_SIZE == sb);
169 mono_vfree (sb_header, block_size);
170 //g_print ("free sb %p\n", sb_header);
173 #ifndef DESC_AVAIL_DUMMY
174 static Descriptor * volatile desc_avail;
179 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
185 desc = get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
187 Descriptor *next = desc->next;
188 success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc);
190 size_t desc_size = sizeof (Descriptor);
194 desc = mono_valloc (0, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE));
196 /* Organize into linked list. */
198 for (i = 0; i < NUM_DESC_BATCH; ++i) {
199 Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
201 mono_lock_free_queue_node_init (&d->node, TRUE);
205 mono_memory_write_barrier ();
207 success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL);
210 mono_vfree (desc, desc_size * NUM_DESC_BATCH);
213 mono_hazard_pointer_clear (hp, 1);
219 g_assert (!desc->in_use);
226 desc_enqueue_avail (gpointer _desc)
228 Descriptor *desc = _desc;
229 Descriptor *old_head;
231 g_assert (desc->anchor.data.state == STATE_EMPTY);
232 g_assert (!desc->in_use);
235 old_head = desc_avail;
236 desc->next = old_head;
237 mono_memory_write_barrier ();
238 } while (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc, old_head) != old_head);
242 desc_retire (Descriptor *desc)
244 g_assert (desc->anchor.data.state == STATE_EMPTY);
245 g_assert (desc->in_use);
246 desc->in_use = FALSE;
247 free_sb (desc->sb, desc->block_size);
248 mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, FALSE, TRUE);
251 MonoLockFreeQueue available_descs;
256 Descriptor *desc = (Descriptor*)mono_lock_free_queue_dequeue (&available_descs);
261 return calloc (1, sizeof (Descriptor));
265 desc_retire (Descriptor *desc)
267 free_sb (desc->sb, desc->block_size);
268 mono_lock_free_queue_enqueue (&available_descs, &desc->node);
273 list_get_partial (MonoLockFreeAllocSizeClass *sc)
276 Descriptor *desc = (Descriptor*) mono_lock_free_queue_dequeue (&sc->partial);
279 if (desc->anchor.data.state != STATE_EMPTY)
286 desc_put_partial (gpointer _desc)
288 Descriptor *desc = _desc;
290 g_assert (desc->anchor.data.state != STATE_FULL);
292 mono_lock_free_queue_node_free (&desc->node);
293 mono_lock_free_queue_enqueue (&desc->heap->sc->partial, &desc->node);
297 list_put_partial (Descriptor *desc)
299 g_assert (desc->anchor.data.state != STATE_FULL);
300 mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
304 list_remove_empty_desc (MonoLockFreeAllocSizeClass *sc)
306 int num_non_empty = 0;
308 Descriptor *desc = (Descriptor*) mono_lock_free_queue_dequeue (&sc->partial);
312 * We don't need to read atomically because we're the
313 * only thread that references this descriptor.
315 if (desc->anchor.data.state == STATE_EMPTY) {
318 g_assert (desc->heap->sc == sc);
319 mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
320 if (++num_non_empty >= 2)
327 heap_get_partial (MonoLockFreeAllocator *heap)
329 return list_get_partial (heap->sc);
333 heap_put_partial (Descriptor *desc)
335 list_put_partial (desc);
339 set_anchor (Descriptor *desc, Anchor old_anchor, Anchor new_anchor)
341 if (old_anchor.data.state == STATE_EMPTY)
342 g_assert (new_anchor.data.state == STATE_EMPTY);
344 return InterlockedCompareExchange (&desc->anchor.value, new_anchor.value, old_anchor.value) == old_anchor.value;
348 alloc_from_active_or_partial (MonoLockFreeAllocator *heap)
351 Anchor old_anchor, new_anchor;
357 if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) != desc)
360 desc = heap_get_partial (heap);
365 /* Now we own the desc. */
370 new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
371 if (old_anchor.data.state == STATE_EMPTY) {
372 /* We must free it because we own it. */
376 g_assert (old_anchor.data.state == STATE_PARTIAL);
377 g_assert (old_anchor.data.count > 0);
379 addr = (char*)desc->sb + old_anchor.data.avail * desc->slot_size;
381 mono_memory_read_barrier ();
383 next = *(unsigned int*)addr;
384 g_assert (next < LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size);
386 new_anchor.data.avail = next;
387 --new_anchor.data.count;
389 if (new_anchor.data.count == 0)
390 new_anchor.data.state = STATE_FULL;
391 } while (!set_anchor (desc, old_anchor, new_anchor));
393 /* If the desc is partial we have to give it back. */
394 if (new_anchor.data.state == STATE_PARTIAL) {
395 if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) != NULL)
396 heap_put_partial (desc);
403 alloc_from_new_sb (MonoLockFreeAllocator *heap)
405 unsigned int slot_size, block_size, count, i;
406 Descriptor *desc = desc_alloc ();
408 slot_size = desc->slot_size = heap->sc->slot_size;
409 block_size = desc->block_size = heap->sc->block_size;
410 count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / slot_size;
414 * Setting avail to 1 because 0 is the block we're allocating
417 desc->anchor.data.avail = 1;
418 desc->slot_size = heap->sc->slot_size;
419 desc->max_count = count;
421 desc->anchor.data.count = desc->max_count - 1;
422 desc->anchor.data.state = STATE_PARTIAL;
424 desc->sb = alloc_sb (desc);
426 /* Organize blocks into linked list. */
427 for (i = 1; i < count - 1; ++i)
428 *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;
430 mono_memory_write_barrier ();
432 /* Make it active or free it again. */
433 if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) == NULL) {
436 desc->anchor.data.state = STATE_EMPTY;
443 mono_lock_free_alloc (MonoLockFreeAllocator *heap)
449 addr = alloc_from_active_or_partial (heap);
453 addr = alloc_from_new_sb (heap);
462 mono_lock_free_free (gpointer ptr, size_t block_size)
464 Anchor old_anchor, new_anchor;
467 MonoLockFreeAllocator *heap = NULL;
469 desc = *(Descriptor**) sb_header_for_addr (ptr, block_size);
470 g_assert (block_size == desc->block_size);
475 new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
476 *(unsigned int*)ptr = old_anchor.data.avail;
477 new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size;
478 g_assert (new_anchor.data.avail < LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / desc->slot_size);
480 if (old_anchor.data.state == STATE_FULL)
481 new_anchor.data.state = STATE_PARTIAL;
483 if (++new_anchor.data.count == desc->max_count) {
485 new_anchor.data.state = STATE_EMPTY;
487 } while (!set_anchor (desc, old_anchor, new_anchor));
489 if (new_anchor.data.state == STATE_EMPTY) {
490 g_assert (old_anchor.data.state != STATE_EMPTY);
492 if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) == desc) {
493 /* We own it, so we free it. */
497 * Somebody else must free it, so we do some
498 * freeing for others.
500 list_remove_empty_desc (heap->sc);
502 } else if (old_anchor.data.state == STATE_FULL) {
504 * Nobody owned it, now we do, so we need to give it
508 g_assert (new_anchor.data.state == STATE_PARTIAL);
510 if (InterlockedCompareExchangePointer ((gpointer * volatile)&desc->heap->active, desc, NULL) != NULL)
511 heap_put_partial (desc);
515 #define g_assert_OR_PRINT(c, format, ...) do { \
518 g_print ((format), ## __VA_ARGS__); \
525 descriptor_check_consistency (Descriptor *desc, gboolean print)
527 int count = desc->anchor.data.count;
528 int max_count = LOCK_FREE_ALLOC_SB_USABLE_SIZE (desc->block_size) / desc->slot_size;
530 gboolean* linked = alloca(max_count*sizeof(gboolean));
532 gboolean linked [max_count];
537 #ifndef DESC_AVAIL_DUMMY
540 for (avail = desc_avail; avail; avail = avail->next)
541 g_assert_OR_PRINT (desc != avail, "descriptor is in the available list\n");
544 g_assert_OR_PRINT (desc->slot_size == desc->heap->sc->slot_size, "slot size doesn't match size class\n");
547 g_print ("descriptor %p is ", desc);
549 switch (desc->anchor.data.state) {
553 g_assert_OR_PRINT (count == 0, "count is not zero: %d\n", count);
557 g_print ("partial\n");
558 g_assert_OR_PRINT (count < max_count, "count too high: is %d but must be below %d\n", count, max_count);
563 g_assert_OR_PRINT (count == max_count, "count is wrong: is %d but should be %d\n", count, max_count);
566 g_assert_OR_PRINT (FALSE, "invalid state\n");
569 for (i = 0; i < max_count; ++i)
572 index = desc->anchor.data.avail;
574 for (i = 0; i < count; ++i) {
575 gpointer addr = (char*)desc->sb + index * desc->slot_size;
576 g_assert_OR_PRINT (index >= 0 && index < max_count,
577 "index %d for %dth available slot, linked from %d, not in range [0 .. %d)\n",
578 index, i, last, max_count);
579 g_assert_OR_PRINT (!linked [index], "%dth available slot %d linked twice\n", i, index);
582 linked [index] = TRUE;
584 index = *(unsigned int*)addr;
589 mono_lock_free_allocator_check_consistency (MonoLockFreeAllocator *heap)
591 Descriptor *active = heap->active;
594 g_assert (active->anchor.data.state == STATE_PARTIAL);
595 descriptor_check_consistency (active, FALSE);
597 while ((desc = (Descriptor*)mono_lock_free_queue_dequeue (&heap->sc->partial))) {
598 g_assert (desc->anchor.data.state == STATE_PARTIAL || desc->anchor.data.state == STATE_EMPTY);
599 descriptor_check_consistency (desc, FALSE);
605 mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size, unsigned int block_size)
607 g_assert (block_size > 0);
608 g_assert ((block_size & (block_size - 1)) == 0); /* check if power of 2 */
609 g_assert (slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size));
611 mono_lock_free_queue_init (&sc->partial);
612 sc->slot_size = slot_size;
613 sc->block_size = block_size;
617 mono_lock_free_allocator_init_allocator (MonoLockFreeAllocator *heap, MonoLockFreeAllocSizeClass *sc)