2 * hazard-pointer.c: Hazard pointer related code.
4 * (C) Copyright 2011 Novell, Inc
9 #include <mono/metadata/class-internals.h>
10 #include <mono/utils/hazard-pointer.h>
11 #include <mono/utils/mono-membar.h>
12 #include <mono/utils/mono-mmap.h>
13 #include <mono/utils/monobitset.h>
14 #include <mono/utils/mono-threads.h>
15 #include <mono/io-layer/io-layer.h>
19 MonoHazardousFreeFunc free_func;
33 typedef struct _DelayedFreeChunk DelayedFreeChunk;
34 struct _DelayedFreeChunk {
35 DelayedFreeChunk *next;
37 DelayedFreeEntry entries [MONO_ZERO_LEN_ARRAY];
40 /* The hazard table */
42 #define HAZARD_TABLE_MAX_SIZE 256
44 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
47 static volatile int hazard_table_size = 0;
48 static MonoThreadHazardPointers * volatile hazard_table = NULL;
50 /* The table where we keep pointers to blocks to be freed but that
51 have to wait because they're guarded by a hazard pointer. */
52 static volatile gint32 num_used_delayed_free_entries;
53 static DelayedFreeChunk *delayed_free_chunk_list;
55 /* The table for small ID assignment */
56 static CRITICAL_SECTION small_id_mutex;
57 static int small_id_next;
58 static int highest_small_id = -1;
59 static MonoBitSet *small_id_table;
64 * The table is a linked list of arrays (chunks). Chunks are never
65 * removed from the list, only added to the end, in a lock-free manner.
67 * Adding or removing an entry in the table is only possible at the end.
68 * To do so, the thread first has to increment or decrement
69 * num_used_delayed_free_entries. The entry thus added or removed now
70 * "belongs" to that thread. It first CASes the state to BUSY,
71 * writes/reads the entry, and then sets the state to USED or FREE.
73 * Note that it's possible that there is contention. Some thread will
74 * always make progress, though.
76 * The simplest case of contention is one thread pushing and another
77 * thread popping the same entry. The state is FREE at first, so the
78 * pushing thread succeeds in setting it to BUSY. The popping thread
79 * will only succeed with its CAS once the state is USED, which is the
80 * case after the pushing thread has finished pushing.
83 static DelayedFreeChunk*
84 alloc_delayed_free_chunk (void)
86 int size = mono_pagesize ();
87 int num_entries = (size - (sizeof (DelayedFreeChunk) - sizeof (DelayedFreeEntry) * MONO_ZERO_LEN_ARRAY)) / sizeof (DelayedFreeEntry);
88 DelayedFreeChunk *chunk = mono_valloc (0, size, MONO_MMAP_READ | MONO_MMAP_WRITE);
89 chunk->num_entries = num_entries;
94 free_delayed_free_chunk (DelayedFreeChunk *chunk)
96 mono_vfree (chunk, mono_pagesize ());
99 static DelayedFreeEntry*
100 get_delayed_free_entry (int index)
102 DelayedFreeChunk *chunk;
104 g_assert (index >= 0);
106 if (!delayed_free_chunk_list) {
107 chunk = alloc_delayed_free_chunk ();
108 mono_memory_write_barrier ();
109 if (InterlockedCompareExchangePointer ((volatile gpointer *)&delayed_free_chunk_list, chunk, NULL) != NULL)
110 free_delayed_free_chunk (chunk);
113 chunk = delayed_free_chunk_list;
116 while (index >= chunk->num_entries) {
117 DelayedFreeChunk *next = chunk->next;
119 next = alloc_delayed_free_chunk ();
120 mono_memory_write_barrier ();
121 if (InterlockedCompareExchangePointer ((volatile gpointer *) &chunk->next, next, NULL) != NULL) {
122 free_delayed_free_chunk (next);
127 index -= chunk->num_entries;
131 return &chunk->entries [index];
135 delayed_free_push (DelayedFreeItem item)
137 int index = InterlockedIncrement (&num_used_delayed_free_entries) - 1;
138 DelayedFreeEntry *entry = get_delayed_free_entry (index);
140 while (InterlockedCompareExchange (&entry->state, DFE_STATE_BUSY, DFE_STATE_FREE) != DFE_STATE_FREE)
143 mono_memory_write_barrier ();
147 mono_memory_write_barrier ();
149 entry->state = DFE_STATE_USED;
153 delayed_free_pop (DelayedFreeItem *item)
156 DelayedFreeEntry *entry;
159 index = num_used_delayed_free_entries;
162 } while (InterlockedCompareExchange (&num_used_delayed_free_entries, index - 1, index) != index);
166 entry = get_delayed_free_entry (index);
168 while (InterlockedCompareExchange (&entry->state, DFE_STATE_BUSY, DFE_STATE_USED) != DFE_STATE_USED)
171 mono_memory_write_barrier ();
175 mono_memory_write_barrier ();
177 entry->state = DFE_STATE_FREE;
183 * Allocate a small thread id.
185 * FIXME: The biggest part of this function is very similar to
186 * domain_id_alloc() in domain.c and should be merged.
189 mono_thread_small_id_alloc (void)
193 EnterCriticalSection (&small_id_mutex);
196 small_id_table = mono_bitset_new (1, 0);
198 id = mono_bitset_find_first_unset (small_id_table, small_id_next);
200 id = mono_bitset_find_first_unset (small_id_table, -1);
203 MonoBitSet *new_table;
204 if (small_id_table->size * 2 >= (1 << 16))
205 g_assert_not_reached ();
206 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
207 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
209 mono_bitset_free (small_id_table);
210 small_id_table = new_table;
213 g_assert (!mono_bitset_test_fast (small_id_table, id));
214 mono_bitset_set_fast (small_id_table, id);
217 if (small_id_next >= small_id_table->size)
220 g_assert (id < HAZARD_TABLE_MAX_SIZE);
221 if (id >= hazard_table_size) {
222 #if MONO_SMALL_CONFIG
223 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
224 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
227 int pagesize = mono_pagesize ();
228 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
230 if (hazard_table == NULL) {
231 hazard_table = mono_valloc (NULL,
232 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
236 g_assert (hazard_table != NULL);
237 page_addr = (guint8*)hazard_table + num_pages * pagesize;
239 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
242 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
245 g_assert (id < hazard_table_size);
246 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
247 hazard_table [id].hazard_pointers [i] = NULL;
250 if (id > highest_small_id) {
251 highest_small_id = id;
252 mono_memory_write_barrier ();
255 LeaveCriticalSection (&small_id_mutex);
261 mono_thread_small_id_free (int id)
263 /* MonoBitSet operations are not atomic. */
264 EnterCriticalSection (&small_id_mutex);
266 g_assert (id >= 0 && id < small_id_table->size);
267 g_assert (mono_bitset_test_fast (small_id_table, id));
268 mono_bitset_clear_fast (small_id_table, id);
270 LeaveCriticalSection (&small_id_mutex);
274 is_pointer_hazardous (gpointer p)
277 int highest = highest_small_id;
279 g_assert (highest < hazard_table_size);
281 for (i = 0; i <= highest; ++i) {
282 if (hazard_table [i].hazard_pointers [0] == p
283 || hazard_table [i].hazard_pointers [1] == p)
290 MonoThreadHazardPointers*
291 mono_hazard_pointer_get (void)
293 MonoThreadInfo *current_thread = mono_thread_info_current ();
295 if (!(current_thread && current_thread->small_id >= 0)) {
296 static MonoThreadHazardPointers emerg_hazard_table;
297 g_warning ("Thread %p may have been prematurely finalized", current_thread);
298 return &emerg_hazard_table;
301 return &hazard_table [current_thread->small_id];
304 MonoThreadHazardPointers*
305 mono_hazard_pointer_get_by_id (int small_id)
307 g_assert (small_id >= 0 && small_id <= highest_small_id);
308 return &hazard_table [small_id];
311 /* Can be called with hp==NULL, in which case it acts as an ordinary
312 pointer fetch. It's used that way indirectly from
313 mono_jit_info_table_add(), which doesn't have to care about hazards
314 because it holds the respective domain lock. */
316 get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
321 /* Get the pointer */
323 /* If we don't have hazard pointers just return the
327 /* Make it hazardous */
328 mono_hazard_pointer_set (hp, hazard_index, p);
329 /* Check that it's still the same. If not, try
332 mono_hazard_pointer_clear (hp, hazard_index);
342 try_free_delayed_free_item (void)
344 DelayedFreeItem item;
345 gboolean popped = delayed_free_pop (&item);
350 if (is_pointer_hazardous (item.p)) {
351 delayed_free_push (item);
355 item.free_func (item.p);
361 mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func)
365 /* First try to free a few entries in the delayed free
367 for (i = 0; i < 3; ++i)
368 try_free_delayed_free_item ();
370 /* Now see if the pointer we're freeing is hazardous. If it
371 isn't, free it. Otherwise put it in the delay list. */
372 if (is_pointer_hazardous (p)) {
373 DelayedFreeItem item = { p, free_func };
375 ++mono_stats.hazardous_pointer_count;
377 delayed_free_push (item);
384 mono_thread_hazardous_try_free_all (void)
386 while (try_free_delayed_free_item ())
391 mono_thread_smr_init (void)
393 InitializeCriticalSection(&small_id_mutex);
397 mono_thread_smr_cleanup (void)
399 DelayedFreeChunk *chunk;
401 mono_thread_hazardous_try_free_all ();
403 chunk = delayed_free_chunk_list;
404 delayed_free_chunk_list = NULL;
406 DelayedFreeChunk *next = chunk->next;
407 free_delayed_free_chunk (chunk);
411 /*FIXME, can't we release the small id table here?*/