2 * hazard-pointer.c: Hazard pointer related code.
4 * (C) Copyright 2011 Novell, Inc
5 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #include <mono/utils/hazard-pointer.h>
13 #include <mono/utils/mono-membar.h>
14 #include <mono/utils/mono-memory-model.h>
15 #include <mono/utils/monobitset.h>
16 #include <mono/utils/lock-free-array-queue.h>
17 #include <mono/utils/atomic.h>
18 #include <mono/utils/mono-os-mutex.h>
19 #ifdef SGEN_WITHOUT_MONO
20 #include <mono/sgen/sgen-gc.h>
21 #include <mono/sgen/sgen-client.h>
23 #include <mono/utils/mono-mmap.h>
24 #include <mono/utils/mono-threads.h>
25 #include <mono/utils/mono-counters.h>
30 MonoHazardousFreeFunc free_func;
33 /* The hazard table */
35 #define HAZARD_TABLE_MAX_SIZE 256
36 #define HAZARD_TABLE_OVERFLOW 4
38 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
39 #define HAZARD_TABLE_OVERFLOW 64
42 static volatile int hazard_table_size = 0;
43 static MonoThreadHazardPointers * volatile hazard_table = NULL;
44 static MonoHazardFreeQueueSizeCallback queue_size_cb;
47 * Each entry is either 0 or 1, indicating whether that overflow small
50 static volatile gint32 overflow_busy [HAZARD_TABLE_OVERFLOW];
52 /* The table where we keep pointers to blocks to be freed but that
53 have to wait because they're guarded by a hazard pointer. */
54 static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem), MONO_MEM_ACCOUNT_HAZARD_POINTERS);
56 /* The table for small ID assignment */
57 static mono_mutex_t small_id_mutex;
58 static int small_id_next;
59 static int highest_small_id = -1;
60 static MonoBitSet *small_id_table;
61 static int hazardous_pointer_count;
64 * Allocate a small thread id.
66 * FIXME: The biggest part of this function is very similar to
67 * domain_id_alloc() in domain.c and should be merged.
70 mono_thread_small_id_alloc (void)
74 mono_os_mutex_lock (&small_id_mutex);
77 small_id_table = mono_bitset_new (1, 0);
79 id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
81 id = mono_bitset_find_first_unset (small_id_table, -1);
84 MonoBitSet *new_table;
85 if (small_id_table->size * 2 >= (1 << 16))
86 g_assert_not_reached ();
87 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
88 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
90 mono_bitset_free (small_id_table);
91 small_id_table = new_table;
94 g_assert (!mono_bitset_test_fast (small_id_table, id));
95 mono_bitset_set_fast (small_id_table, id);
98 if (small_id_next >= small_id_table->size)
101 g_assert (id < HAZARD_TABLE_MAX_SIZE);
102 if (id >= hazard_table_size) {
103 #if MONO_SMALL_CONFIG
104 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
105 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
108 int pagesize = mono_pagesize ();
109 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
111 if (hazard_table == NULL) {
112 hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
113 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
114 MONO_MMAP_NONE, MONO_MEM_ACCOUNT_HAZARD_POINTERS);
117 g_assert (hazard_table != NULL);
118 page_addr = (guint8*)hazard_table + num_pages * pagesize;
120 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
123 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
126 g_assert (id < hazard_table_size);
127 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
128 hazard_table [id].hazard_pointers [i] = NULL;
131 if (id > highest_small_id) {
132 highest_small_id = id;
133 mono_memory_write_barrier ();
136 mono_os_mutex_unlock (&small_id_mutex);
142 mono_thread_small_id_free (int id)
144 /* MonoBitSet operations are not atomic. */
145 mono_os_mutex_lock (&small_id_mutex);
147 g_assert (id >= 0 && id < small_id_table->size);
148 g_assert (mono_bitset_test_fast (small_id_table, id));
149 mono_bitset_clear_fast (small_id_table, id);
151 mono_os_mutex_unlock (&small_id_mutex);
155 is_pointer_hazardous (gpointer p)
158 int highest = highest_small_id;
160 g_assert (highest < hazard_table_size);
162 for (i = 0; i <= highest; ++i) {
163 for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
164 if (hazard_table [i].hazard_pointers [j] == p)
173 MonoThreadHazardPointers*
174 mono_hazard_pointer_get (void)
176 int small_id = mono_thread_info_get_small_id ();
179 static MonoThreadHazardPointers emerg_hazard_table;
180 g_warning ("Thread %p may have been prematurely finalized", (gpointer) (gsize) mono_native_thread_id_get ());
181 return &emerg_hazard_table;
184 return &hazard_table [small_id];
187 /* Can be called with hp==NULL, in which case it acts as an ordinary
188 pointer fetch. It's used that way indirectly from
189 mono_jit_info_table_add(), which doesn't have to care about hazards
190 because it holds the respective domain lock. */
192 mono_get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
197 /* Get the pointer */
199 /* If we don't have hazard pointers just return the
203 /* Make it hazardous */
204 mono_hazard_pointer_set (hp, hazard_index, p);
205 /* Check that it's still the same. If not, try
208 mono_hazard_pointer_clear (hp, hazard_index);
218 mono_hazard_pointer_save_for_signal_handler (void)
221 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
222 MonoThreadHazardPointers *hp_overflow;
224 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
225 if (hp->hazard_pointers [i])
230 for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) {
231 if (!overflow_busy [small_id])
236 * If this assert fails we don't have enough overflow slots.
237 * We should contemplate adding them dynamically. If we can
238 * make mono_thread_small_id_alloc() lock-free we can just
239 * allocate them on-demand.
241 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
243 if (InterlockedCompareExchange (&overflow_busy [small_id], 1, 0) != 0)
246 hp_overflow = &hazard_table [small_id];
248 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
249 g_assert (!hp_overflow->hazard_pointers [i]);
252 mono_memory_write_barrier ();
254 memset (hp, 0, sizeof (MonoThreadHazardPointers));
260 mono_hazard_pointer_restore_for_signal_handler (int small_id)
262 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
263 MonoThreadHazardPointers *hp_overflow;
269 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
270 g_assert (overflow_busy [small_id]);
272 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
273 g_assert (!hp->hazard_pointers [i]);
275 hp_overflow = &hazard_table [small_id];
279 mono_memory_write_barrier ();
281 memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers));
283 mono_memory_write_barrier ();
285 overflow_busy [small_id] = 0;
289 * mono_thread_hazardous_try_free:
290 * @p: the pointer to free
291 * @free_func: the function that can free the pointer
293 * If @p is not a hazardous pointer it will be immediately freed by calling @free_func.
294 * Otherwise it will be queued for later.
296 * Use this function if @free_func can ALWAYS be called in the context where this function is being called.
298 * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
299 * See mono_thread_hazardous_try_free_some for when it's appropriate.
301 * Return: TRUE if @p was free or FALSE if it was queued.
304 mono_thread_hazardous_try_free (gpointer p, MonoHazardousFreeFunc free_func)
306 if (!is_pointer_hazardous (p)) {
310 mono_thread_hazardous_queue_free (p, free_func);
316 * mono_thread_hazardous_queue_free:
317 * @p: the pointer to free
318 * @free_func: the function that can free the pointer
320 * Queue @p to be freed later. @p will be freed once the hazard free queue is pumped.
322 * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
323 * See mono_thread_hazardous_try_free_some for when it's appropriate.
327 mono_thread_hazardous_queue_free (gpointer p, MonoHazardousFreeFunc free_func)
329 DelayedFreeItem item = { p, free_func };
331 InterlockedIncrement (&hazardous_pointer_count);
333 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
335 guint32 queue_size = delayed_free_queue.num_used_entries;
336 if (queue_size && queue_size_cb)
337 queue_size_cb (queue_size);
342 mono_hazard_pointer_install_free_queue_size_callback (MonoHazardFreeQueueSizeCallback cb)
348 try_free_delayed_free_items (guint32 limit)
350 GArray *hazardous = NULL;
351 DelayedFreeItem item;
354 // Free all the items we can and re-add the ones we can't to the queue.
355 while (mono_lock_free_array_queue_pop (&delayed_free_queue, &item)) {
356 if (is_pointer_hazardous (item.p)) {
358 hazardous = g_array_sized_new (FALSE, FALSE, sizeof (DelayedFreeItem), delayed_free_queue.num_used_entries);
360 g_array_append_val (hazardous, item);
364 item.free_func (item.p);
367 if (limit && freed == limit)
372 for (gint i = 0; i < hazardous->len; i++)
373 mono_lock_free_array_queue_push (&delayed_free_queue, &g_array_index (hazardous, DelayedFreeItem, i));
375 g_array_free (hazardous, TRUE);
380 mono_thread_hazardous_try_free_all (void)
382 try_free_delayed_free_items (0);
386 mono_thread_hazardous_try_free_some (void)
388 try_free_delayed_free_items (10);
392 mono_thread_smr_init (void)
396 mono_os_mutex_init_recursive(&small_id_mutex);
397 mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count);
399 for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) {
400 int small_id = mono_thread_small_id_alloc ();
401 g_assert (small_id == i);
406 mono_thread_smr_cleanup (void)
408 mono_thread_hazardous_try_free_all ();
410 mono_lock_free_array_queue_cleanup (&delayed_free_queue);
412 /*FIXME, can't we release the small id table here?*/