3 * Handle to object in native code
6 * - Ludovic Henry <ludovic@xamarin.com>
7 * - Aleksey Klieger <aleksey.klieger@xamarin.com>
8 * - Rodrigo Kumpera <kumpera@xamarin.com>
10 * Copyright 2016 Dot net foundation.
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/handle.h>
18 #include <mono/metadata/object-internals.h>
19 #include <mono/metadata/gc-internals.h>
20 #include <mono/utils/atomic.h>
21 #include <mono/utils/mono-lazy-init.h>
22 #include <mono/utils/mono-threads.h>
23 /* TODO (missing pieces)
28 mix/max/avg size of stack marks
31 Actually do something in mono_handle_verify
33 Shrink the handles stack in mono_handle_stack_scan
34 Properly report it to the profiler.
35 Add a boehm implementation
37 TODO (things to explore):
39 There's no convenient way to wrap the object allocation function.
41 MonoCultureInfoHandle culture = MONO_HANDLE_NEW (MonoCultureInfo, mono_object_new_checked (domain, klass, &error));
43 Maybe what we need is a round of cleanup around all exposed types in the runtime to unify all helpers under the same hoof.
44 Combine: MonoDefaults, GENERATE_GET_CLASS_WITH_CACHE, TYPED_HANDLE_DECL and friends.
45 This would solve the age old issue of making it clear which types are optional and tell that to the linker.
46 We could then generate neat type safe wrappers.
52 * If we are running with cooperative GC, all the handle stack
53 * manipulation will complete before a GC thread scans the handle
54 * stack. If we are using async suspend, however, a thread may be
55 * trying to allocate a new handle, or unwind the handle stack when
56 * the GC stops the world.
58 * In particular, we need to ensure that if the mutator thread is
59 * suspended while manipulating the handle stack, the stack is in a
60 * good enough state to be scanned. In particular, the size of each
61 * chunk should be updated before an object is written into the
62 * handle, and chunks to be scanned (between bottom and top) should
65 * Note that the handle stack is scanned PRECISELY (see
66 * sgen_client_scan_thread_data ()). That means there should not be
67 * stale objects scanned. So when we manipulate the size of a chunk,
68 * wemust ensure that the newly scannable slot is either null or
69 * points to a valid value.
72 const MonoObjectHandle mono_null_value_handle = NULL;
74 #define THIS_IS_AN_OK_NUMBER_OF_HANDLES 100
77 HANDLE_CHUNK_PTR_OBJ = 0x0, /* chunk element points to beginning of a managed object */
78 HANDLE_CHUNK_PTR_INTERIOR = 0x1, /* chunk element points into the middle of a managed object */
79 HANDLE_CHUNK_PTR_MASK = 0x1
82 /* number of bits in each word of the interior pointer bitmap */
83 #define INTERIOR_HANDLE_BITMAP_BITS_PER_WORD (sizeof(guint32) << 3)
86 bitset_bits_test (guint32 *bitmaps, int idx)
88 int w = idx / INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
89 int b = idx % INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
90 guint32 bitmap = bitmaps [w];
91 guint32 mask = 1u << b;
92 return ((bitmap & mask) != 0);
96 bitset_bits_set (guint32 *bitmaps, int idx)
98 int w = idx / INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
99 int b = idx % INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
100 guint32 *bitmap = &bitmaps [w];
101 guint32 mask = 1u << b;
105 bitset_bits_clear (guint32 *bitmaps, int idx)
107 int w = idx / INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
108 int b = idx % INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
109 guint32 *bitmap = &bitmaps [w];
110 guint32 mask = ~(1u << b);
115 chunk_element_objslot_init (HandleChunk *chunk, int idx, gboolean interior)
118 bitset_bits_set (chunk->interior_bitmap, idx);
120 bitset_bits_clear (chunk->interior_bitmap, idx);
121 return &chunk->elems [idx].o;
124 static HandleChunkElem*
125 chunk_element (HandleChunk *chunk, int idx)
127 return &chunk->elems[idx];
131 chunk_element_kind (HandleChunk *chunk, int idx)
133 return bitset_bits_test (chunk->interior_bitmap, idx) ? HANDLE_CHUNK_PTR_INTERIOR : HANDLE_CHUNK_PTR_OBJ;
136 static HandleChunkElem*
137 handle_to_chunk_element (MonoObjectHandle o)
139 return (HandleChunkElem*)o;
142 /* Given a HandleChunkElem* search through the current handle stack to find its chunk and offset. */
144 chunk_element_to_chunk_idx (HandleStack *stack, HandleChunkElem *elem, int *out_idx)
146 HandleChunk *top = stack->top;
147 HandleChunk *cur = stack->bottom;
151 while (cur != NULL) {
152 HandleChunkElem *front = &cur->elems [0];
153 HandleChunkElem *back = &cur->elems [cur->size];
155 if (front <= elem && elem < back) {
156 *out_idx = (int)(elem - front);
161 break; /* didn't find it. */
167 #ifdef MONO_HANDLE_TRACK_OWNER
168 #define SET_OWNER(chunk,idx) do { (chunk)->elems[(idx)].owner = owner; } while (0)
170 #define SET_OWNER(chunk,idx) do { } while (0)
173 #ifdef MONO_HANDLE_TRACK_SP
174 #define SET_SP(handles,chunk,idx) do { (chunk)->elems[(idx)].alloc_sp = handles->stackmark_sp; } while (0)
176 #define SET_SP(handles,chunk,idx) do { } while (0)
179 #ifdef MONO_HANDLE_TRACK_SP
181 mono_handle_chunk_leak_check (HandleStack *handles) {
182 if (handles->stackmark_sp) {
183 /* walk back from the top to the topmost non-empty chunk */
184 HandleChunk *c = handles->top;
185 while (c && c->size <= 0 && c != handles->bottom) {
188 if (c == NULL || c->size == 0)
190 g_assert (c && c->size > 0);
191 HandleChunkElem *e = chunk_element (c, c->size - 1);
192 if (e->alloc_sp < handles->stackmark_sp) {
193 /* If we get here, the topmost object on the handle stack was
194 * allocated from a function that is deeper in the call stack than
195 * the most recent HANDLE_FUNCTION_ENTER. That means it was
196 * probably not wrapped in a HANDLE_FUNCTION_ENTER/_RETURN pair
197 * and will never be reclaimed. */
198 g_warning ("Handle %p (object = %p) (allocated from \"%s\") is leaking.\n", e, e->o,
199 #ifdef MONO_HANDLE_TRACK_OWNER
211 #ifndef MONO_HANDLE_TRACK_OWNER
212 mono_handle_new (MonoObject *object)
214 mono_handle_new (MonoObject *object, const char *owner)
217 #ifndef MONO_HANDLE_TRACK_OWNER
218 return mono_handle_new_full (object, FALSE);
220 return mono_handle_new_full (object, FALSE, owner);
223 /* Actual handles implementation */
225 #ifndef MONO_HANDLE_TRACK_OWNER
226 mono_handle_new_full (gpointer rawptr, gboolean interior)
228 mono_handle_new_full (gpointer rawptr, gboolean interior, const char *owner)
231 MonoThreadInfo *info = mono_thread_info_current ();
232 HandleStack *handles = (HandleStack *)info->handle_stack;
233 HandleChunk *top = handles->top;
234 #ifdef MONO_HANDLE_TRACK_SP
235 mono_handle_chunk_leak_check (handles);
239 if (G_LIKELY (top->size < OBJECTS_PER_HANDLES_CHUNK)) {
241 gpointer* objslot = chunk_element_objslot_init (top, idx, interior);
242 /* can be interrupted anywhere here, so:
243 * 1. make sure the new slot is null
244 * 2. make the new slot scannable (increment size)
245 * 3. put a valid object in there
247 * (have to do 1 then 3 so that if we're interrupted
248 * between 1 and 2, the object is still live)
251 mono_memory_write_barrier ();
253 mono_memory_write_barrier ();
256 SET_SP (handles, top, idx);
259 if (G_LIKELY (top->next)) {
261 /* make sure size == 0 is visible to a GC thread before it sees the new top */
262 mono_memory_write_barrier ();
267 HandleChunk *new_chunk = g_new (HandleChunk, 1);
269 memset (new_chunk->interior_bitmap, 0, INTERIOR_HANDLE_BITMAP_WORDS);
270 new_chunk->prev = top;
271 new_chunk->next = NULL;
272 /* make sure size == 0 before new chunk is visible */
273 mono_memory_write_barrier ();
274 top->next = new_chunk;
275 handles->top = new_chunk;
282 mono_handle_stack_alloc (void)
284 HandleStack *stack = g_new (HandleStack, 1);
285 HandleChunk *chunk = g_new (HandleChunk, 1);
288 memset (chunk->interior_bitmap, 0, INTERIOR_HANDLE_BITMAP_WORDS);
289 chunk->prev = chunk->next = NULL;
290 mono_memory_write_barrier ();
291 stack->top = stack->bottom = chunk;
292 #ifdef MONO_HANDLE_TRACK_OWNER
293 stack->stackmark_sp = NULL;
299 mono_handle_stack_free (HandleStack *stack)
303 HandleChunk *c = stack->bottom;
304 stack->top = stack->bottom = NULL;
305 mono_memory_write_barrier ();
307 HandleChunk *next = c->next;
316 check_handle_stack_monotonic (HandleStack *stack)
318 /* check that every allocated handle in the current handle stack is at no higher in the native stack than its predecessors */
319 #ifdef MONO_HANDLE_TRACK_SP
320 HandleChunk *cur = stack->bottom;
321 HandleChunk *last = stack->top;
324 HandleChunkElem *prev = NULL;
325 gboolean monotonic = TRUE;
327 for (int i = 0;i < cur->size; ++i) {
328 HandleChunkElem *elem = chunk_element (cur, i);
329 if (prev && elem->alloc_sp < prev->alloc_sp) {
331 g_warning ("Handle %p (object %p) (allocated from \"%s\") is was allocated deeper in the call stack than its successor (allocated from \"%s\").", prev, prev->o,
332 #ifdef MONO_HANDLE_TRACK_OWNER
348 g_assert (monotonic);
353 mono_handle_stack_scan (HandleStack *stack, GcScanFunc func, gpointer gc_data, gboolean precise)
355 if (precise) /* run just once (per handle stack) per GC */
356 check_handle_stack_monotonic (stack);
358 We're called twice - on the imprecise pass we call func to pin the
359 objects where the handle points to its interior. On the precise
360 pass, we scan all the objects where the handles point to the start of
363 Note that if we're running, we know the world is stopped.
365 HandleChunk *cur = stack->bottom;
366 HandleChunk *last = stack->top;
372 /* assume that object pointers will be much more common than interior pointers.
373 * scan the object pointers by iterating over the chunk elements.
374 * scan the interior pointers by iterating over the bitmap bits.
377 for (int i = 0; i < cur->size; ++i) {
378 HandleChunkElem* elem = chunk_element (cur, i);
379 int kind = chunk_element_kind (cur, i);
380 gpointer* obj_slot = &elem->o;
381 if (kind == HANDLE_CHUNK_PTR_OBJ && *obj_slot != NULL)
382 func (obj_slot, gc_data);
386 for (int i = 0; i < INTERIOR_HANDLE_BITMAP_WORDS; ++i) {
387 elem_idx = i * INTERIOR_HANDLE_BITMAP_BITS_PER_WORD;
388 if (elem_idx >= cur->size)
390 /* no interior pointers in the range */
391 if (cur->interior_bitmap [i] == 0)
393 for (int j = 0; j < INTERIOR_HANDLE_BITMAP_BITS_PER_WORD && elem_idx < cur->size; ++j,++elem_idx) {
394 HandleChunkElem *elem = chunk_element (cur, elem_idx);
395 int kind = chunk_element_kind (cur, elem_idx);
396 gpointer *ptr_slot = &elem->o;
397 if (kind == HANDLE_CHUNK_PTR_INTERIOR && *ptr_slot != NULL)
398 func (ptr_slot, gc_data);
409 mono_stack_mark_record_size (MonoThreadInfo *info, HandleStackMark *stackmark, const char *func_name)
411 HandleStack *handles = (HandleStack *)info->handle_stack;
412 HandleChunk *cur = stackmark->chunk;
413 int size = -stackmark->size; //discard the starting point of the stack
416 if (cur == handles->top)
421 if (size > THIS_IS_AN_OK_NUMBER_OF_HANDLES)
422 g_warning ("%s USED %d handles\n", func_name, size);
426 * Pop the stack until @stackmark and make @value the top value.
428 * @return the new handle for what @value points to
431 mono_stack_mark_pop_value (MonoThreadInfo *info, HandleStackMark *stackmark, MonoRawHandle value)
433 MonoObject *obj = value ? *((MonoObject**)value) : NULL;
434 mono_stack_mark_pop (info, stackmark);
435 #ifndef MONO_HANDLE_TRACK_OWNER
436 return mono_handle_new (obj);
438 return mono_handle_new (obj, "<mono_stack_mark_pop_value>");
442 /* Temporary place for some of the handle enabled wrapper functions*/
445 mono_string_new_handle (MonoDomain *domain, const char *data, MonoError *error)
447 return MONO_HANDLE_NEW (MonoString, mono_string_new_checked (domain, data, error));
451 mono_array_new_handle (MonoDomain *domain, MonoClass *eclass, uintptr_t n, MonoError *error)
453 return MONO_HANDLE_NEW (MonoArray, mono_array_new_checked (domain, eclass, n, error));
457 mono_array_new_full_handle (MonoDomain *domain, MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error)
459 return MONO_HANDLE_NEW (MonoArray, mono_array_new_full_checked (domain, array_class, lengths, lower_bounds, error));
462 #ifdef ENABLE_CHECKED_BUILD
463 /* Checked build helpers */
465 mono_handle_verify (MonoRawHandle raw_handle)
472 mono_array_handle_length (MonoArrayHandle arr)
474 MONO_REQ_GC_UNSAFE_MODE;
476 return MONO_HANDLE_RAW (arr)->max_length;
480 mono_gchandle_from_handle (MonoObjectHandle handle, mono_bool pinned)
482 /* FIXME: chunk_element_to_chunk_idx does a linear search through the
483 * chunks and we only need it for the assert */
484 MonoThreadInfo *info = mono_thread_info_current ();
485 HandleStack *stack = (HandleStack*) info->handle_stack;
486 HandleChunkElem* elem = handle_to_chunk_element (handle);
488 HandleChunk *chunk = chunk_element_to_chunk_idx (stack, elem, &elem_idx);
489 /* gchandles cannot deal with interior pointers */
490 g_assert (chunk != NULL);
491 g_assert (chunk_element_kind (chunk, elem_idx) != HANDLE_CHUNK_PTR_INTERIOR);
492 return mono_gchandle_new (MONO_HANDLE_RAW (handle), pinned);
496 mono_gchandle_get_target_handle (uint32_t gchandle)
498 return MONO_HANDLE_NEW (MonoObject, mono_gchandle_get_target (gchandle));
502 mono_array_handle_pin_with_size (MonoArrayHandle handle, int size, uintptr_t idx, uint32_t *gchandle)
504 g_assert (gchandle != NULL);
505 *gchandle = mono_gchandle_from_handle (MONO_HANDLE_CAST(MonoObject,handle), TRUE);
506 MonoArray *raw = MONO_HANDLE_RAW (handle);
507 return mono_array_addr_with_size (raw, size, idx);
511 mono_array_handle_memcpy_refs (MonoArrayHandle dest, uintptr_t dest_idx, MonoArrayHandle src, uintptr_t src_idx, uintptr_t len)
513 mono_array_memcpy_refs (MONO_HANDLE_RAW (dest), dest_idx, MONO_HANDLE_RAW (src), src_idx, len);
517 mono_handle_stack_is_empty (HandleStack *stack)
519 return (stack->top == stack->bottom && stack->top->size == 0);