#include <mono/utils/mono-threads.h>
#include <mono/utils/atomic.h>
#include <mono/utils/mono-coop-semaphore.h>
+#include <mono/utils/hazard-pointer.h>
#ifndef HOST_WIN32
#include <pthread.h>
mono_coop_sem_post (&finalizer_sem);
}
+/*
+This is the number of entries allowed in the hazard free queue before
+we explicitly cycle the finalizer thread to trigger pumping the queue.
+
+It was picked empirically by running the corlib test suite in a stress
+scenario where all hazard entries are queued.
+
+In this extreme scenario we double the number of times we cycle the finalizer
+thread compared to just GC calls.
+
+Entries are usually in the order of 100's of bytes each, so we're limiting
+floating garbage to be in the order of a dozen kb.
+*/
+static gboolean finalizer_thread_pulsed;
+#define HAZARD_QUEUE_OVERFLOW_SIZE 20
+
+static void
+hazard_free_queue_is_too_big (size_t size)
+{
+ if (size < HAZARD_QUEUE_OVERFLOW_SIZE)
+ return;
+
+ if (finalizer_thread_pulsed || InterlockedCompareExchange (&finalizer_thread_pulsed, TRUE, FALSE))
+ return;
+
+ mono_gc_finalize_notify ();
+}
+
+static void
+hazard_free_queue_pump (void)
+{
+ mono_thread_hazardous_try_free_all ();
+ finalizer_thread_pulsed = FALSE;
+}
+
#ifdef HAVE_BOEHM_GC
static void
{
gboolean wait = TRUE;
+ /* Register a hazard free queue pump callback */
+ mono_hazard_pointer_install_free_queue_size_callback (hazard_free_queue_is_too_big);
+
while (!finished) {
/* Wait to be notified that there's at least one
* finaliser to run
reference_queue_proccess_all ();
+ hazard_free_queue_pump ();
+
/* Avoid posting the pending done event until there are pending finalizers */
if (mono_coop_sem_timedwait (&finalizer_sem, 0, MONO_SEM_FLAGS_NONE) == 0) {
/* Don't wait again at the start of the loop */
*table_ptr = new_table;
mono_memory_barrier ();
domain->num_jit_info_tables++;
- mono_thread_hazardous_free_or_queue (table, (MonoHazardousFreeFunc)mono_jit_info_table_free, HAZARD_FREE_MAY_LOCK, HAZARD_FREE_SAFE_CTX);
+ mono_thread_hazardous_try_free (table, (MonoHazardousFreeFunc)mono_jit_info_table_free);
table = new_table;
goto restart;
if (domain->num_jit_info_tables <= 1) {
/* Can it actually happen that we only have one table
but ji is still hazardous? */
- mono_thread_hazardous_free_or_queue (ji, g_free, HAZARD_FREE_MAY_LOCK, HAZARD_FREE_SAFE_CTX);
+ mono_thread_hazardous_try_free (ji, g_free);
} else {
domain->jit_info_free_queue = g_slist_prepend (domain->jit_info_free_queue, ji);
}
* ######################################################################
*/
MonoCoopMutex gc_mutex;
-gboolean sgen_try_free_some_memory;
#define SCAN_START_SIZE SGEN_SCAN_START_SIZE
void
sgen_gc_unlock (void)
{
- gboolean try_free = sgen_try_free_some_memory;
- sgen_try_free_some_memory = FALSE;
mono_coop_mutex_unlock (&gc_mutex);
- if (try_free)
- mono_thread_hazardous_try_free_some ();
}
void
binary_protocol_world_restarted (generation, sgen_timestamp ());
- sgen_try_free_some_memory = TRUE;
-
if (sgen_client_bridge_need_processing ())
sgen_client_bridge_processing_finish (generation);
static volatile int hazard_table_size = 0;
static MonoThreadHazardPointers * volatile hazard_table = NULL;
+static MonoHazardFreeQueueSizeCallback queue_size_cb;
/*
* Each entry is either 0 or 1, indicating whether that overflow small
return TRUE;
}
+/**
+ * mono_thread_hazardous_try_free:
+ * @p: the pointer to free
+ * @free_func: the function that can free the pointer
+ *
+ * If @p is not a hazardous pointer it will be immediately freed by calling @free_func.
+ * Otherwise it will be queued for later.
+ *
+ * Use this function if @free_func can ALWAYS be called in the context where this function is being called.
+ *
+ * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
+ * See mono_thread_hazardous_try_free_some for when it's appropriate.
+ *
+ * Return: TRUE if @p was free or FALSE if it was queued.
+ */
+gboolean
+mono_thread_hazardous_try_free (gpointer p, MonoHazardousFreeFunc free_func)
+{
+ if (!is_pointer_hazardous (p)) {
+ free_func (p);
+ return TRUE;
+ } else {
+ mono_thread_hazardous_queue_free (p, free_func);
+ return FALSE;
+ }
+}
+
+/**
+ * mono_thread_hazardous_queue_free:
+ * @p: the pointer to free
+ * @free_func: the function that can free the pointer
+ *
+ * Queue @p to be freed later. @p will be freed once the hazard free queue is pumped.
+ *
+ * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
+ * See mono_thread_hazardous_try_free_some for when it's appropriate.
+ *
+ */
void
-mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
- HazardFreeLocking locking, HazardFreeContext context)
+mono_thread_hazardous_queue_free (gpointer p, MonoHazardousFreeFunc free_func)
{
- int i;
+ DelayedFreeItem item = { p, free_func, HAZARD_FREE_MAY_LOCK };
- /* First try to free a few entries in the delayed free
- table. */
- for (i = 0; i < 3; ++i)
- try_free_delayed_free_item (context);
+ InterlockedIncrement (&hazardous_pointer_count);
- /* Now see if the pointer we're freeing is hazardous. If it
- isn't, free it. Otherwise put it in the delay list. */
- if ((context == HAZARD_FREE_ASYNC_CTX && locking == HAZARD_FREE_MAY_LOCK) ||
- is_pointer_hazardous (p)) {
- DelayedFreeItem item = { p, free_func, locking };
+ mono_lock_free_array_queue_push (&delayed_free_queue, &item);
- ++hazardous_pointer_count;
+ guint32 queue_size = delayed_free_queue.num_used_entries;
+ if (queue_size && queue_size_cb)
+ queue_size_cb (queue_size);
+}
- mono_lock_free_array_queue_push (&delayed_free_queue, &item);
- } else {
- free_func (p);
- }
+
+void
+mono_hazard_pointer_install_free_queue_size_callback (MonoHazardFreeQueueSizeCallback cb)
+{
+ queue_size_cb = cb;
}
void
HAZARD_FREE_ASYNC_CTX,
} HazardFreeContext;
-void mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
- HazardFreeLocking locking, HazardFreeContext context);
+gboolean mono_thread_hazardous_try_free (gpointer p, MonoHazardousFreeFunc free_func);
+void mono_thread_hazardous_queue_free (gpointer p, MonoHazardousFreeFunc free_func);
+
void mono_thread_hazardous_try_free_all (void);
void mono_thread_hazardous_try_free_some (void);
MonoThreadHazardPointers* mono_hazard_pointer_get (void);
int mono_hazard_pointer_save_for_signal_handler (void);
void mono_hazard_pointer_restore_for_signal_handler (int small_id);
+typedef void (*MonoHazardFreeQueueSizeCallback)(size_t size);
+void mono_hazard_pointer_install_free_queue_size_callback (MonoHazardFreeQueueSizeCallback cb);
+
void mono_thread_smr_init (void);
void mono_thread_smr_cleanup (void);
#endif /*__MONO_HAZARD_POINTER_H__*/
g_assert (desc->in_use);
desc->in_use = FALSE;
free_sb (desc->sb, desc->block_size);
- mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX);
+ mono_thread_hazardous_try_free (desc, desc_enqueue_avail);
}
#else
MonoLockFreeQueue available_descs;
list_put_partial (Descriptor *desc)
{
g_assert (desc->anchor.data.state != STATE_FULL);
- mono_thread_hazardous_free_or_queue (desc, desc_put_partial, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX);
+ mono_thread_hazardous_try_free (desc, desc_put_partial);
}
static void
desc_retire (desc);
} else {
g_assert (desc->heap->sc == sc);
- mono_thread_hazardous_free_or_queue (desc, desc_put_partial, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX);
+ mono_thread_hazardous_try_free (desc, desc_put_partial);
if (++num_non_empty >= 2)
return;
}
g_assert (q->has_dummy);
q->has_dummy = 0;
mono_memory_write_barrier ();
- mono_thread_hazardous_free_or_queue (head, free_dummy, HAZARD_FREE_NO_LOCK, HAZARD_FREE_ASYNC_CTX);
+ mono_thread_hazardous_try_free (head, free_dummy);
if (try_reenqueue_dummy (q))
goto retry;
return NULL;
static void
conc_table_lf_free (conc_table *table)
{
- mono_thread_hazardous_free_or_queue (table, conc_table_free, HAZARD_FREE_MAY_LOCK, HAZARD_FREE_SAFE_CTX);
+ mono_thread_hazardous_try_free (table, conc_table_free);
}
mono_memory_write_barrier ();
mono_hazard_pointer_clear (hp, 1);
if (list->free_node_func)
- mono_thread_hazardous_free_or_queue (cur, list->free_node_func, list->locking, context);
+ mono_thread_hazardous_queue_free (cur, list->free_node_func);
} else
goto try_again;
}
mono_memory_write_barrier ();
mono_hazard_pointer_clear (hp, 1);
if (list->free_node_func)
- mono_thread_hazardous_free_or_queue (value, list->free_node_func, list->locking, context);
+ mono_thread_hazardous_try_free (value, list->free_node_func);
} else
mono_lls_find (list, hp, value->key, context);
return TRUE;
mono_memory_write_barrier (); \
mono_hazard_pointer_clear (hp__, 1); \
if (list__->free_node_func) { \
- mono_thread_hazardous_free_or_queue (cur__, list__->free_node_func, list__->locking, HAZARD_FREE_ASYNC_CTX); \
+ mono_thread_hazardous_queue_free (cur__, list__->free_node_func); \
} \
} else { \
restart__ = TRUE; \
g_byte_array_free (info->stackdata, /*free_segment=*/TRUE);
/*now it's safe to free the thread info.*/
- mono_thread_hazardous_free_or_queue (info, free_thread_info, HAZARD_FREE_MAY_LOCK, HAZARD_FREE_SAFE_CTX);
+ mono_thread_hazardous_try_free (info, free_thread_info);
+ /* Pump the HP queue */
+ mono_thread_hazardous_try_free_some ();
+
mono_thread_small_id_free (small_id);
}