Move thread management from sgen to utils. Move smr from MonoInternalThread to thread...
[mono.git] / mono / utils / hazard-pointer.c
1 /*
2  * hazard-pointer.c: Hazard pointer related code.
3  *
4  * (C) Copyright 2011 Novell, Inc
5  */
6
7 #include <config.h>
8
9 #include <mono/metadata/class-internals.h>
10 #include <mono/utils/hazard-pointer.h>
11 #include <mono/utils/mono-membar.h>
12 #include <mono/utils/mono-mmap.h>
13 #include <mono/utils/monobitset.h>
14 #include <mono/utils/mono-threads.h>
15 #include <mono/io-layer/io-layer.h>
16
17 typedef struct {
18         gpointer p;
19         MonoHazardousFreeFunc free_func;
20 } DelayedFreeItem;
21
22 /* The hazard table */
23 #if MONO_SMALL_CONFIG
24 #define HAZARD_TABLE_MAX_SIZE   256
25 #else
26 #define HAZARD_TABLE_MAX_SIZE   16384 /* There cannot be more threads than this number. */
27 #endif
28
29 static volatile int hazard_table_size = 0;
30 static MonoThreadHazardPointers * volatile hazard_table = NULL;
31
32 /* The table where we keep pointers to blocks to be freed but that
33    have to wait because they're guarded by a hazard pointer. */
34 static CRITICAL_SECTION delayed_free_table_mutex;
35 static GArray *delayed_free_table = NULL;
36
37 /* The table for small ID assignment */
38 static CRITICAL_SECTION small_id_mutex;
39 static int small_id_next;
40 static int highest_small_id = -1;
41 static MonoBitSet *small_id_table;
42
43 /*
44  * Allocate a small thread id.
45  *
46  * FIXME: The biggest part of this function is very similar to
47  * domain_id_alloc() in domain.c and should be merged.
48  */
49 int
50 mono_thread_small_id_alloc (void)
51 {
52         int i, id = -1;
53
54         EnterCriticalSection (&small_id_mutex);
55
56         if (!small_id_table)
57                 small_id_table = mono_bitset_new (1, 0);
58
59         id = mono_bitset_find_first_unset (small_id_table, small_id_next);
60         if (id == -1)
61                 id = mono_bitset_find_first_unset (small_id_table, -1);
62
63         if (id == -1) {
64                 MonoBitSet *new_table;
65                 if (small_id_table->size * 2 >= (1 << 16))
66                         g_assert_not_reached ();
67                 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
68                 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
69
70                 mono_bitset_free (small_id_table);
71                 small_id_table = new_table;
72         }
73
74         g_assert (!mono_bitset_test_fast (small_id_table, id));
75         mono_bitset_set_fast (small_id_table, id);
76
77         small_id_next++;
78         if (small_id_next >= small_id_table->size)
79                 small_id_next = 0;
80
81         g_assert (id < HAZARD_TABLE_MAX_SIZE);
82         if (id >= hazard_table_size) {
83 #if MONO_SMALL_CONFIG
84                 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
85                 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
86 #else
87                 gpointer page_addr;
88                 int pagesize = mono_pagesize ();
89                 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
90
91                 if (hazard_table == NULL) {
92                         hazard_table = mono_valloc (NULL,
93                                 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
94                                 MONO_MMAP_NONE);
95                 }
96
97                 g_assert (hazard_table != NULL);
98                 page_addr = (guint8*)hazard_table + num_pages * pagesize;
99
100                 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
101
102                 ++num_pages;
103                 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
104
105 #endif
106                 g_assert (id < hazard_table_size);
107                 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
108                         hazard_table [id].hazard_pointers [i] = NULL;
109         }
110
111         if (id > highest_small_id) {
112                 highest_small_id = id;
113                 mono_memory_write_barrier ();
114         }
115
116         LeaveCriticalSection (&small_id_mutex);
117
118         return id;
119 }
120
121 void
122 mono_thread_small_id_free (int id)
123 {
124         /* MonoBitSet operations are not atomic. */
125         EnterCriticalSection (&small_id_mutex);
126
127         g_assert (id >= 0 && id < small_id_table->size);
128         g_assert (mono_bitset_test_fast (small_id_table, id));
129         mono_bitset_clear_fast (small_id_table, id);
130
131         LeaveCriticalSection (&small_id_mutex);
132 }
133
134 static gboolean
135 is_pointer_hazardous (gpointer p)
136 {
137         int i;
138         int highest = highest_small_id;
139
140         g_assert (highest < hazard_table_size);
141
142         for (i = 0; i <= highest; ++i) {
143                 if (hazard_table [i].hazard_pointers [0] == p
144                                 || hazard_table [i].hazard_pointers [1] == p)
145                         return TRUE;
146         }
147
148         return FALSE;
149 }
150
151 MonoThreadHazardPointers*
152 mono_hazard_pointer_get (void)
153 {
154         MonoThreadInfo *current_thread = mono_thread_info_current ();
155
156         if (!(current_thread && current_thread->small_id >= 0)) {
157                 static MonoThreadHazardPointers emerg_hazard_table;
158                 g_warning ("Thread %p may have been prematurely finalized", current_thread);
159                 return &emerg_hazard_table;
160         }
161
162         return &hazard_table [current_thread->small_id];
163 }
164
165 MonoThreadHazardPointers*
166 mono_hazard_pointer_get_by_id (int small_id)
167 {
168         g_assert (small_id >= 0 && small_id <= highest_small_id);
169         return &hazard_table [small_id];
170 }
171
172 /* Can be called with hp==NULL, in which case it acts as an ordinary
173    pointer fetch.  It's used that way indirectly from
174    mono_jit_info_table_add(), which doesn't have to care about hazards
175    because it holds the respective domain lock. */
176 gpointer
177 get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
178 {
179         gpointer p;
180
181         for (;;) {
182                 /* Get the pointer */
183                 p = *pp;
184                 /* If we don't have hazard pointers just return the
185                    pointer. */
186                 if (!hp)
187                         return p;
188                 /* Make it hazardous */
189                 mono_hazard_pointer_set (hp, hazard_index, p);
190                 /* Check that it's still the same.  If not, try
191                    again. */
192                 if (*pp != p) {
193                         mono_hazard_pointer_clear (hp, hazard_index);
194                         continue;
195                 }
196                 break;
197         }
198
199         return p;
200 }
201
202 static void
203 try_free_delayed_free_item (int index)
204 {
205         if (delayed_free_table->len > index) {
206                 DelayedFreeItem item = { NULL, NULL };
207
208                 EnterCriticalSection (&delayed_free_table_mutex);
209                 /* We have to check the length again because another
210                    thread might have freed an item before we acquired
211                    the lock. */
212                 if (delayed_free_table->len > index) {
213                         item = g_array_index (delayed_free_table, DelayedFreeItem, index);
214
215                         if (!is_pointer_hazardous (item.p))
216                                 g_array_remove_index_fast (delayed_free_table, index);
217                         else
218                                 item.p = NULL;
219                 }
220                 LeaveCriticalSection (&delayed_free_table_mutex);
221
222                 if (item.p != NULL)
223                         item.free_func (item.p);
224         }
225 }
226
227 void
228 mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func)
229 {
230         int i;
231
232         /* First try to free a few entries in the delayed free
233            table. */
234         for (i = 2; i >= 0; --i)
235                 try_free_delayed_free_item (i);
236
237         /* Now see if the pointer we're freeing is hazardous.  If it
238            isn't, free it.  Otherwise put it in the delay list. */
239         if (is_pointer_hazardous (p)) {
240                 DelayedFreeItem item = { p, free_func };
241
242                 ++mono_stats.hazardous_pointer_count;
243
244                 EnterCriticalSection (&delayed_free_table_mutex);
245                 g_array_append_val (delayed_free_table, item);
246                 LeaveCriticalSection (&delayed_free_table_mutex);
247         } else
248                 free_func (p);
249 }
250
251 void
252 mono_thread_hazardous_try_free_all (void)
253 {
254         int len;
255         int i;
256
257         if (!delayed_free_table)
258                 return;
259
260         len = delayed_free_table->len;
261
262         for (i = len - 1; i >= 0; --i)
263                 try_free_delayed_free_item (i);
264 }
265
266 void
267 mono_thread_smr_init (void)
268 {
269         InitializeCriticalSection(&delayed_free_table_mutex);
270         InitializeCriticalSection(&small_id_mutex);     
271
272         delayed_free_table = g_array_new (FALSE, FALSE, sizeof (DelayedFreeItem));
273 }
274
275 void
276 mono_thread_smr_cleanup (void)
277 {
278         mono_thread_hazardous_try_free_all ();
279
280         g_array_free (delayed_free_table, TRUE);
281         delayed_free_table = NULL;
282         /*FIXME, can't we release the small id table here?*/
283 }