2 * hazard-pointer.c: Hazard pointer related code.
4 * (C) Copyright 2011 Novell, Inc
9 #include <mono/metadata/class-internals.h>
10 #include <mono/utils/hazard-pointer.h>
11 #include <mono/utils/mono-membar.h>
12 #include <mono/utils/mono-mmap.h>
13 #include <mono/utils/monobitset.h>
14 #include <mono/utils/mono-threads.h>
15 #include <mono/utils/lock-free-array-queue.h>
16 #include <mono/io-layer/io-layer.h>
20 MonoHazardousFreeFunc free_func;
23 /* The hazard table */
25 #define HAZARD_TABLE_MAX_SIZE 256
27 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
30 static volatile int hazard_table_size = 0;
31 static MonoThreadHazardPointers * volatile hazard_table = NULL;
33 /* The table where we keep pointers to blocks to be freed but that
34 have to wait because they're guarded by a hazard pointer. */
35 static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
37 /* The table for small ID assignment */
38 static CRITICAL_SECTION small_id_mutex;
39 static int small_id_next;
40 static int highest_small_id = -1;
41 static MonoBitSet *small_id_table;
44 * Allocate a small thread id.
46 * FIXME: The biggest part of this function is very similar to
47 * domain_id_alloc() in domain.c and should be merged.
50 mono_thread_small_id_alloc (void)
54 EnterCriticalSection (&small_id_mutex);
57 small_id_table = mono_bitset_new (1, 0);
59 id = mono_bitset_find_first_unset (small_id_table, small_id_next);
61 id = mono_bitset_find_first_unset (small_id_table, -1);
64 MonoBitSet *new_table;
65 if (small_id_table->size * 2 >= (1 << 16))
66 g_assert_not_reached ();
67 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
68 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
70 mono_bitset_free (small_id_table);
71 small_id_table = new_table;
74 g_assert (!mono_bitset_test_fast (small_id_table, id));
75 mono_bitset_set_fast (small_id_table, id);
78 if (small_id_next >= small_id_table->size)
81 g_assert (id < HAZARD_TABLE_MAX_SIZE);
82 if (id >= hazard_table_size) {
84 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
85 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
88 int pagesize = mono_pagesize ();
89 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
91 if (hazard_table == NULL) {
92 hazard_table = mono_valloc (NULL,
93 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
97 g_assert (hazard_table != NULL);
98 page_addr = (guint8*)hazard_table + num_pages * pagesize;
100 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
103 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
106 g_assert (id < hazard_table_size);
107 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
108 hazard_table [id].hazard_pointers [i] = NULL;
111 if (id > highest_small_id) {
112 highest_small_id = id;
113 mono_memory_write_barrier ();
116 LeaveCriticalSection (&small_id_mutex);
122 mono_thread_small_id_free (int id)
124 /* MonoBitSet operations are not atomic. */
125 EnterCriticalSection (&small_id_mutex);
127 g_assert (id >= 0 && id < small_id_table->size);
128 g_assert (mono_bitset_test_fast (small_id_table, id));
129 mono_bitset_clear_fast (small_id_table, id);
131 LeaveCriticalSection (&small_id_mutex);
135 is_pointer_hazardous (gpointer p)
138 int highest = highest_small_id;
140 g_assert (highest < hazard_table_size);
142 for (i = 0; i <= highest; ++i) {
143 for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
144 if (hazard_table [i].hazard_pointers [j] == p)
152 MonoThreadHazardPointers*
153 mono_hazard_pointer_get (void)
155 int small_id = mono_thread_info_get_small_id ();
158 static MonoThreadHazardPointers emerg_hazard_table;
159 g_warning ("Thread %p may have been prematurely finalized", (gpointer)mono_native_thread_id_get ());
160 return &emerg_hazard_table;
163 return &hazard_table [small_id];
166 /* Can be called with hp==NULL, in which case it acts as an ordinary
167 pointer fetch. It's used that way indirectly from
168 mono_jit_info_table_add(), which doesn't have to care about hazards
169 because it holds the respective domain lock. */
171 get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
176 /* Get the pointer */
178 /* If we don't have hazard pointers just return the
182 /* Make it hazardous */
183 mono_hazard_pointer_set (hp, hazard_index, p);
184 /* Check that it's still the same. If not, try
187 mono_hazard_pointer_clear (hp, hazard_index);
197 try_free_delayed_free_item (void)
199 DelayedFreeItem item;
200 gboolean popped = mono_lock_free_array_queue_pop (&delayed_free_queue, &item);
205 if (is_pointer_hazardous (item.p)) {
206 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
210 item.free_func (item.p);
216 mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func)
220 /* First try to free a few entries in the delayed free
222 for (i = 0; i < 3; ++i)
223 try_free_delayed_free_item ();
225 /* Now see if the pointer we're freeing is hazardous. If it
226 isn't, free it. Otherwise put it in the delay list. */
227 if (is_pointer_hazardous (p)) {
228 DelayedFreeItem item = { p, free_func };
230 ++mono_stats.hazardous_pointer_count;
232 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
239 mono_thread_hazardous_try_free_all (void)
241 while (try_free_delayed_free_item ())
246 mono_thread_smr_init (void)
248 InitializeCriticalSection(&small_id_mutex);
252 mono_thread_smr_cleanup (void)
254 mono_thread_hazardous_try_free_all ();
256 mono_lock_free_array_queue_cleanup (&delayed_free_queue);
258 /*FIXME, can't we release the small id table here?*/