Merge pull request #2810 from kumpera/fix_hazard_free
[mono.git] / mono / utils / hazard-pointer.c
index f0cf3e1575d32d7fc9584fc6b1b99dc38d982c49..efe6b649b3039450cbdc176f0373cd55754a69be 100644 (file)
@@ -2,43 +2,65 @@
  * hazard-pointer.c: Hazard pointer related code.
  *
  * (C) Copyright 2011 Novell, Inc
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
  */
 
 #include <config.h>
 
-#include <mono/metadata/class-internals.h>
+#include <string.h>
+
 #include <mono/utils/hazard-pointer.h>
 #include <mono/utils/mono-membar.h>
-#include <mono/utils/mono-mmap.h>
+#include <mono/utils/mono-memory-model.h>
 #include <mono/utils/monobitset.h>
+#include <mono/utils/lock-free-array-queue.h>
+#include <mono/utils/atomic.h>
+#include <mono/utils/mono-os-mutex.h>
+#ifdef SGEN_WITHOUT_MONO
+#include <mono/sgen/sgen-gc.h>
+#include <mono/sgen/sgen-client.h>
+#else
+#include <mono/utils/mono-mmap.h>
 #include <mono/utils/mono-threads.h>
+#include <mono/utils/mono-counters.h>
 #include <mono/io-layer/io-layer.h>
+#endif
 
 typedef struct {
        gpointer p;
        MonoHazardousFreeFunc free_func;
+       HazardFreeLocking locking;
 } DelayedFreeItem;
 
 /* The hazard table */
 #if MONO_SMALL_CONFIG
 #define HAZARD_TABLE_MAX_SIZE  256
+#define HAZARD_TABLE_OVERFLOW  4
 #else
 #define HAZARD_TABLE_MAX_SIZE  16384 /* There cannot be more threads than this number. */
+#define HAZARD_TABLE_OVERFLOW  64
 #endif
 
 static volatile int hazard_table_size = 0;
 static MonoThreadHazardPointers * volatile hazard_table = NULL;
+static MonoHazardFreeQueueSizeCallback queue_size_cb;
+
+/*
+ * Each entry is either 0 or 1, indicating whether that overflow small
+ * ID is busy.
+ */
+static volatile gint32 overflow_busy [HAZARD_TABLE_OVERFLOW];
 
 /* The table where we keep pointers to blocks to be freed but that
    have to wait because they're guarded by a hazard pointer. */
-static CRITICAL_SECTION delayed_free_table_mutex;
-static GArray *delayed_free_table = NULL;
+static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
 
 /* The table for small ID assignment */
-static CRITICAL_SECTION small_id_mutex;
+static mono_mutex_t small_id_mutex;
 static int small_id_next;
 static int highest_small_id = -1;
 static MonoBitSet *small_id_table;
+static int hazardous_pointer_count;
 
 /*
  * Allocate a small thread id.
@@ -51,12 +73,12 @@ mono_thread_small_id_alloc (void)
 {
        int i, id = -1;
 
-       EnterCriticalSection (&small_id_mutex);
+       mono_os_mutex_lock (&small_id_mutex);
 
        if (!small_id_table)
                small_id_table = mono_bitset_new (1, 0);
 
-       id = mono_bitset_find_first_unset (small_id_table, small_id_next);
+       id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
        if (id == -1)
                id = mono_bitset_find_first_unset (small_id_table, -1);
 
@@ -89,7 +111,7 @@ mono_thread_small_id_alloc (void)
                int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
 
                if (hazard_table == NULL) {
-                       hazard_table = mono_valloc (NULL,
+                       hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
                                sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
                                MONO_MMAP_NONE);
                }
@@ -113,7 +135,7 @@ mono_thread_small_id_alloc (void)
                mono_memory_write_barrier ();
        }
 
-       LeaveCriticalSection (&small_id_mutex);
+       mono_os_mutex_unlock (&small_id_mutex);
 
        return id;
 }
@@ -122,27 +144,29 @@ void
 mono_thread_small_id_free (int id)
 {
        /* MonoBitSet operations are not atomic. */
-       EnterCriticalSection (&small_id_mutex);
+       mono_os_mutex_lock (&small_id_mutex);
 
        g_assert (id >= 0 && id < small_id_table->size);
        g_assert (mono_bitset_test_fast (small_id_table, id));
        mono_bitset_clear_fast (small_id_table, id);
 
-       LeaveCriticalSection (&small_id_mutex);
+       mono_os_mutex_unlock (&small_id_mutex);
 }
 
 static gboolean
 is_pointer_hazardous (gpointer p)
 {
-       int i;
+       int i, j;
        int highest = highest_small_id;
 
        g_assert (highest < hazard_table_size);
 
        for (i = 0; i <= highest; ++i) {
-               if (hazard_table [i].hazard_pointers [0] == p
-                               || hazard_table [i].hazard_pointers [1] == p)
-                       return TRUE;
+               for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
+                       if (hazard_table [i].hazard_pointers [j] == p)
+                               return TRUE;
+                       LOAD_LOAD_FENCE;
+               }
        }
 
        return FALSE;
@@ -151,21 +175,14 @@ is_pointer_hazardous (gpointer p)
 MonoThreadHazardPointers*
 mono_hazard_pointer_get (void)
 {
-       MonoThreadInfo *current_thread = mono_thread_info_current ();
+       int small_id = mono_thread_info_get_small_id ();
 
-       if (!(current_thread && current_thread->small_id >= 0)) {
+       if (small_id < 0) {
                static MonoThreadHazardPointers emerg_hazard_table;
-               g_warning ("Thread %p may have been prematurely finalized", current_thread);
+               g_warning ("Thread %p may have been prematurely finalized", (gpointer) (gsize) mono_native_thread_id_get ());
                return &emerg_hazard_table;
        }
 
-       return &hazard_table [current_thread->small_id];
-}
-
-MonoThreadHazardPointers*
-mono_hazard_pointer_get_by_id (int small_id)
-{
-       g_assert (small_id >= 0 && small_id <= highest_small_id);
        return &hazard_table [small_id];
 }
 
@@ -199,77 +216,183 @@ get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int
        return p;
 }
 
-static void
-try_free_delayed_free_item (int index)
+int
+mono_hazard_pointer_save_for_signal_handler (void)
 {
-       if (delayed_free_table->len > index) {
-               DelayedFreeItem item = { NULL, NULL };
-
-               EnterCriticalSection (&delayed_free_table_mutex);
-               /* We have to check the length again because another
-                  thread might have freed an item before we acquired
-                  the lock. */
-               if (delayed_free_table->len > index) {
-                       item = g_array_index (delayed_free_table, DelayedFreeItem, index);
-
-                       if (!is_pointer_hazardous (item.p))
-                               g_array_remove_index_fast (delayed_free_table, index);
-                       else
-                               item.p = NULL;
-               }
-               LeaveCriticalSection (&delayed_free_table_mutex);
-
-               if (item.p != NULL)
-                       item.free_func (item.p);
+       int small_id, i;
+       MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
+       MonoThreadHazardPointers *hp_overflow;
+
+       for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
+               if (hp->hazard_pointers [i])
+                       goto search;
+       return -1;
+
+ search:
+       for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) {
+               if (!overflow_busy [small_id])
+                       break;
        }
+
+       /*
+        * If this assert fails we don't have enough overflow slots.
+        * We should contemplate adding them dynamically.  If we can
+        * make mono_thread_small_id_alloc() lock-free we can just
+        * allocate them on-demand.
+        */
+       g_assert (small_id < HAZARD_TABLE_OVERFLOW);
+
+       if (InterlockedCompareExchange (&overflow_busy [small_id], 1, 0) != 0)
+               goto search;
+
+       hp_overflow = &hazard_table [small_id];
+
+       for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
+               g_assert (!hp_overflow->hazard_pointers [i]);
+       *hp_overflow = *hp;
+
+       mono_memory_write_barrier ();
+
+       memset (hp, 0, sizeof (MonoThreadHazardPointers));
+
+       return small_id;
 }
 
 void
-mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func)
+mono_hazard_pointer_restore_for_signal_handler (int small_id)
 {
+       MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
+       MonoThreadHazardPointers *hp_overflow;
        int i;
 
-       /* First try to free a few entries in the delayed free
-          table. */
-       for (i = 2; i >= 0; --i)
-               try_free_delayed_free_item (i);
+       if (small_id < 0)
+               return;
+
+       g_assert (small_id < HAZARD_TABLE_OVERFLOW);
+       g_assert (overflow_busy [small_id]);
+
+       for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
+               g_assert (!hp->hazard_pointers [i]);
+
+       hp_overflow = &hazard_table [small_id];
+
+       *hp = *hp_overflow;
+
+       mono_memory_write_barrier ();
+
+       memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers));
+
+       mono_memory_write_barrier ();
+
+       overflow_busy [small_id] = 0;
+}
+
+static gboolean
+try_free_delayed_free_item (HazardFreeContext context)
+{
+       DelayedFreeItem item;
+       gboolean popped = mono_lock_free_array_queue_pop (&delayed_free_queue, &item);
+
+       if (!popped)
+               return FALSE;
 
-       /* Now see if the pointer we're freeing is hazardous.  If it
-          isn't, free it.  Otherwise put it in the delay list. */
-       if (is_pointer_hazardous (p)) {
-               DelayedFreeItem item = { p, free_func };
+       if ((context == HAZARD_FREE_ASYNC_CTX && item.locking == HAZARD_FREE_MAY_LOCK) ||
+           (is_pointer_hazardous (item.p))) {
+               mono_lock_free_array_queue_push (&delayed_free_queue, &item);
+               return FALSE;
+       }
 
-               ++mono_stats.hazardous_pointer_count;
+       item.free_func (item.p);
 
-               EnterCriticalSection (&delayed_free_table_mutex);
-               g_array_append_val (delayed_free_table, item);
-               LeaveCriticalSection (&delayed_free_table_mutex);
-       } else
+       return TRUE;
+}
+
+/**
+ * mono_thread_hazardous_try_free:
+ * @p: the pointer to free
+ * @free_func: the function that can free the pointer
+ *
+ * If @p is not a hazardous pointer it will be immediately freed by calling @free_func.
+ * Otherwise it will be queued for later.
+ *
+ * Use this function if @free_func can ALWAYS be called in the context where this function is being called.
+ *
+ * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
+ * See mono_thread_hazardous_try_free_some for when it's appropriate.
+ *
+ * Return: TRUE if @p was free or FALSE if it was queued.
+ */
+gboolean
+mono_thread_hazardous_try_free (gpointer p, MonoHazardousFreeFunc free_func)
+{
+       if (!is_pointer_hazardous (p)) {
                free_func (p);
+               return TRUE;
+       } else {
+               mono_thread_hazardous_queue_free (p, free_func);
+               return FALSE;
+       }
 }
 
+/**
+ * mono_thread_hazardous_queue_free:
+ * @p: the pointer to free
+ * @free_func: the function that can free the pointer
+ *
+ * Queue @p to be freed later. @p will be freed once the hazard free queue is pumped.
+ *
+ * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
+ * See mono_thread_hazardous_try_free_some for when it's appropriate.
+ *
+ */
 void
-mono_thread_hazardous_try_free_all (void)
+mono_thread_hazardous_queue_free (gpointer p, MonoHazardousFreeFunc free_func)
 {
-       int len;
-       int i;
+       DelayedFreeItem item = { p, free_func, HAZARD_FREE_MAY_LOCK };
 
-       if (!delayed_free_table)
-               return;
+       InterlockedIncrement (&hazardous_pointer_count);
 
-       len = delayed_free_table->len;
+       mono_lock_free_array_queue_push (&delayed_free_queue, &item);
 
-       for (i = len - 1; i >= 0; --i)
-               try_free_delayed_free_item (i);
+       guint32 queue_size = delayed_free_queue.num_used_entries;
+       if (queue_size && queue_size_cb)
+               queue_size_cb (queue_size);
+}
+
+
+void
+mono_hazard_pointer_install_free_queue_size_callback (MonoHazardFreeQueueSizeCallback cb)
+{
+       queue_size_cb = cb;
+}
+
+void
+mono_thread_hazardous_try_free_all (void)
+{
+       while (try_free_delayed_free_item (HAZARD_FREE_SAFE_CTX))
+               ;
+}
+
+void
+mono_thread_hazardous_try_free_some (void)
+{
+       int i;
+       for (i = 0; i < 10; ++i)
+               try_free_delayed_free_item (HAZARD_FREE_SAFE_CTX);
 }
 
 void
 mono_thread_smr_init (void)
 {
-       InitializeCriticalSection(&delayed_free_table_mutex);
-       InitializeCriticalSection(&small_id_mutex);     
+       int i;
+
+       mono_os_mutex_init_recursive(&small_id_mutex);
+       mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count);
 
-       delayed_free_table = g_array_new (FALSE, FALSE, sizeof (DelayedFreeItem));
+       for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) {
+               int small_id = mono_thread_small_id_alloc ();
+               g_assert (small_id == i);
+       }
 }
 
 void
@@ -277,7 +400,7 @@ mono_thread_smr_cleanup (void)
 {
        mono_thread_hazardous_try_free_all ();
 
-       g_array_free (delayed_free_table, TRUE);
-       delayed_free_table = NULL;
+       mono_lock_free_array_queue_cleanup (&delayed_free_queue);
+
        /*FIXME, can't we release the small id table here?*/
 }