2 * hazard-pointer.c: Hazard pointer related code.
4 * (C) Copyright 2011 Novell, Inc
5 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #include <mono/utils/hazard-pointer.h>
13 #include <mono/utils/mono-membar.h>
14 #include <mono/utils/mono-memory-model.h>
15 #include <mono/utils/monobitset.h>
16 #include <mono/utils/lock-free-array-queue.h>
17 #include <mono/utils/atomic.h>
18 #include <mono/utils/mono-os-mutex.h>
19 #ifdef SGEN_WITHOUT_MONO
20 #include <mono/sgen/sgen-gc.h>
21 #include <mono/sgen/sgen-client.h>
23 #include <mono/utils/mono-mmap.h>
24 #include <mono/utils/mono-threads.h>
25 #include <mono/utils/mono-counters.h>
26 #include <mono/io-layer/io-layer.h>
31 MonoHazardousFreeFunc free_func;
32 HazardFreeLocking locking;
35 /* The hazard table */
37 #define HAZARD_TABLE_MAX_SIZE 256
38 #define HAZARD_TABLE_OVERFLOW 4
40 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
41 #define HAZARD_TABLE_OVERFLOW 64
44 static volatile int hazard_table_size = 0;
45 static MonoThreadHazardPointers * volatile hazard_table = NULL;
48 * Each entry is either 0 or 1, indicating whether that overflow small
51 static volatile gint32 overflow_busy [HAZARD_TABLE_OVERFLOW];
53 /* The table where we keep pointers to blocks to be freed but that
54 have to wait because they're guarded by a hazard pointer. */
55 static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
57 /* The table for small ID assignment */
58 static mono_mutex_t small_id_mutex;
59 static int small_id_next;
60 static int highest_small_id = -1;
61 static MonoBitSet *small_id_table;
62 static int hazardous_pointer_count;
65 * Allocate a small thread id.
67 * FIXME: The biggest part of this function is very similar to
68 * domain_id_alloc() in domain.c and should be merged.
71 mono_thread_small_id_alloc (void)
75 mono_os_mutex_lock (&small_id_mutex);
78 small_id_table = mono_bitset_new (1, 0);
80 id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
82 id = mono_bitset_find_first_unset (small_id_table, -1);
85 MonoBitSet *new_table;
86 if (small_id_table->size * 2 >= (1 << 16))
87 g_assert_not_reached ();
88 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
89 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
91 mono_bitset_free (small_id_table);
92 small_id_table = new_table;
95 g_assert (!mono_bitset_test_fast (small_id_table, id));
96 mono_bitset_set_fast (small_id_table, id);
99 if (small_id_next >= small_id_table->size)
102 g_assert (id < HAZARD_TABLE_MAX_SIZE);
103 if (id >= hazard_table_size) {
104 #if MONO_SMALL_CONFIG
105 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
106 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
109 int pagesize = mono_pagesize ();
110 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
112 if (hazard_table == NULL) {
113 hazard_table = (MonoThreadHazardPointers *volatile) mono_valloc (NULL,
114 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
118 g_assert (hazard_table != NULL);
119 page_addr = (guint8*)hazard_table + num_pages * pagesize;
121 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
124 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
127 g_assert (id < hazard_table_size);
128 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
129 hazard_table [id].hazard_pointers [i] = NULL;
132 if (id > highest_small_id) {
133 highest_small_id = id;
134 mono_memory_write_barrier ();
137 mono_os_mutex_unlock (&small_id_mutex);
143 mono_thread_small_id_free (int id)
145 /* MonoBitSet operations are not atomic. */
146 mono_os_mutex_lock (&small_id_mutex);
148 g_assert (id >= 0 && id < small_id_table->size);
149 g_assert (mono_bitset_test_fast (small_id_table, id));
150 mono_bitset_clear_fast (small_id_table, id);
152 mono_os_mutex_unlock (&small_id_mutex);
156 is_pointer_hazardous (gpointer p)
159 int highest = highest_small_id;
161 g_assert (highest < hazard_table_size);
163 for (i = 0; i <= highest; ++i) {
164 for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
165 if (hazard_table [i].hazard_pointers [j] == p)
174 MonoThreadHazardPointers*
175 mono_hazard_pointer_get (void)
177 int small_id = mono_thread_info_get_small_id ();
180 static MonoThreadHazardPointers emerg_hazard_table;
181 g_warning ("Thread %p may have been prematurely finalized", (gpointer) (gsize) mono_native_thread_id_get ());
182 return &emerg_hazard_table;
185 return &hazard_table [small_id];
188 /* Can be called with hp==NULL, in which case it acts as an ordinary
189 pointer fetch. It's used that way indirectly from
190 mono_jit_info_table_add(), which doesn't have to care about hazards
191 because it holds the respective domain lock. */
193 get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
198 /* Get the pointer */
200 /* If we don't have hazard pointers just return the
204 /* Make it hazardous */
205 mono_hazard_pointer_set (hp, hazard_index, p);
206 /* Check that it's still the same. If not, try
209 mono_hazard_pointer_clear (hp, hazard_index);
219 mono_hazard_pointer_save_for_signal_handler (void)
222 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
223 MonoThreadHazardPointers *hp_overflow;
225 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
226 if (hp->hazard_pointers [i])
231 for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) {
232 if (!overflow_busy [small_id])
237 * If this assert fails we don't have enough overflow slots.
238 * We should contemplate adding them dynamically. If we can
239 * make mono_thread_small_id_alloc() lock-free we can just
240 * allocate them on-demand.
242 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
244 if (InterlockedCompareExchange (&overflow_busy [small_id], 1, 0) != 0)
247 hp_overflow = &hazard_table [small_id];
249 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
250 g_assert (!hp_overflow->hazard_pointers [i]);
253 mono_memory_write_barrier ();
255 memset (hp, 0, sizeof (MonoThreadHazardPointers));
261 mono_hazard_pointer_restore_for_signal_handler (int small_id)
263 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
264 MonoThreadHazardPointers *hp_overflow;
270 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
271 g_assert (overflow_busy [small_id]);
273 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
274 g_assert (!hp->hazard_pointers [i]);
276 hp_overflow = &hazard_table [small_id];
280 mono_memory_write_barrier ();
282 memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers));
284 mono_memory_write_barrier ();
286 overflow_busy [small_id] = 0;
290 try_free_delayed_free_item (HazardFreeContext context)
292 DelayedFreeItem item;
293 gboolean popped = mono_lock_free_array_queue_pop (&delayed_free_queue, &item);
298 if ((context == HAZARD_FREE_ASYNC_CTX && item.locking == HAZARD_FREE_MAY_LOCK) ||
299 (is_pointer_hazardous (item.p))) {
300 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
304 item.free_func (item.p);
310 mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
311 HazardFreeLocking locking, HazardFreeContext context)
315 /* First try to free a few entries in the delayed free
317 for (i = 0; i < 3; ++i)
318 try_free_delayed_free_item (context);
320 /* Now see if the pointer we're freeing is hazardous. If it
321 isn't, free it. Otherwise put it in the delay list. */
322 if ((context == HAZARD_FREE_ASYNC_CTX && locking == HAZARD_FREE_MAY_LOCK) ||
323 is_pointer_hazardous (p)) {
324 DelayedFreeItem item = { p, free_func, locking };
326 ++hazardous_pointer_count;
328 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
335 mono_thread_hazardous_try_free_all (void)
337 while (try_free_delayed_free_item (HAZARD_FREE_SAFE_CTX))
342 mono_thread_hazardous_try_free_some (void)
345 for (i = 0; i < 10; ++i)
346 try_free_delayed_free_item (HAZARD_FREE_SAFE_CTX);
350 mono_thread_smr_init (void)
354 mono_os_mutex_init_recursive(&small_id_mutex);
355 mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count);
357 for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) {
358 int small_id = mono_thread_small_id_alloc ();
359 g_assert (small_id == i);
364 mono_thread_smr_cleanup (void)
366 mono_thread_hazardous_try_free_all ();
368 mono_lock_free_array_queue_cleanup (&delayed_free_queue);
370 /*FIXME, can't we release the small id table here?*/