5 * Copyright 2001-2003 Ximian, Inc
6 * Copyright 2003-2010 Novell, Inc.
7 * Copyright (C) 2012 Xamarin Inc
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include "mono/sgen/sgen-gc.h"
18 #include "mono/sgen/sgen-pinning.h"
19 #include "mono/sgen/sgen-protocol.h"
20 #include "mono/sgen/sgen-pointer-queue.h"
21 #include "mono/sgen/sgen-client.h"
23 static SgenPointerQueue pin_queue;
24 static size_t last_num_pinned = 0;
26 * While we hold the pin_queue_mutex, all objects in pin_queue_objs will
27 * stay pinned, which means they can't move, therefore they can be scanned.
29 static SgenPointerQueue pin_queue_objs;
30 static mono_mutex_t pin_queue_mutex;
32 #define PIN_HASH_SIZE 1024
33 static void *pin_hash_filter [PIN_HASH_SIZE];
36 sgen_pinning_init (void)
38 mono_os_mutex_init (&pin_queue_mutex);
42 sgen_init_pinning (void)
44 memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
45 pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
49 sgen_init_pinning_for_conc (void)
51 mono_os_mutex_lock (&pin_queue_mutex);
52 sgen_pointer_queue_clear (&pin_queue_objs);
56 sgen_finish_pinning (void)
58 last_num_pinned = pin_queue.next_slot;
59 sgen_pointer_queue_clear (&pin_queue);
63 sgen_finish_pinning_for_conc (void)
65 mono_os_mutex_unlock (&pin_queue_mutex);
69 sgen_pinning_register_pinned_in_nursery (GCObject *obj)
71 sgen_pointer_queue_add (&pin_queue_objs, obj);
75 sgen_scan_pin_queue_objects (ScanCopyContext ctx)
78 ScanObjectFunc scan_func = ctx.ops->scan_object;
80 mono_os_mutex_lock (&pin_queue_mutex);
81 for (i = 0; i < pin_queue_objs.next_slot; ++i) {
82 GCObject *obj = (GCObject *)pin_queue_objs.data [i];
83 scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx.queue);
85 mono_os_mutex_unlock (&pin_queue_mutex);
89 sgen_pin_stage_ptr (void *ptr)
91 /*very simple multiplicative hash function, tons better than simple and'ng */
92 int hash_idx = ((mword)ptr * 1737350767) & (PIN_HASH_SIZE - 1);
93 if (pin_hash_filter [hash_idx] == ptr)
96 pin_hash_filter [hash_idx] = ptr;
98 sgen_pointer_queue_add (&pin_queue, ptr);
102 sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out)
104 size_t first = sgen_pointer_queue_search (&pin_queue, start);
105 size_t last = sgen_pointer_queue_search (&pin_queue, end);
106 SGEN_ASSERT (0, last == pin_queue.next_slot || pin_queue.data [last] >= end, "Pin queue search gone awry");
109 return first != last;
113 sgen_pinning_get_entry (size_t index)
115 SGEN_ASSERT (0, index <= pin_queue.next_slot, "Pin queue entry out of range");
116 return &pin_queue.data [index];
120 sgen_find_section_pin_queue_start_end (GCMemSection *section)
122 SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
124 sgen_find_optimized_pin_queue_area (section->data, section->end_data,
125 §ion->pin_queue_first_entry, §ion->pin_queue_last_entry);
127 SGEN_LOG (6, "Found %zd pinning addresses in section %p",
128 section->pin_queue_last_entry - section->pin_queue_first_entry, section);
131 /*This will setup the given section for the while pin queue. */
133 sgen_pinning_setup_section (GCMemSection *section)
135 section->pin_queue_first_entry = 0;
136 section->pin_queue_last_entry = pin_queue.next_slot;
140 sgen_pinning_trim_queue_to_section (GCMemSection *section)
142 SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery");
143 pin_queue.next_slot = section->pin_queue_last_entry;
147 * This is called when we've run out of memory during a major collection.
149 * After collecting potential pin entries and sorting the array, this is what it looks like:
151 * +--------------------+---------------------------------------------+--------------------+
152 * | major heap entries | nursery entries | major heap entries |
153 * +--------------------+---------------------------------------------+--------------------+
155 * Of course there might not be major heap entries before and/or after the nursery entries,
156 * depending on where the major heap sections are in the address space, and whether there
157 * were any potential pointers there.
159 * When we pin nursery objects, we compact the nursery part of the pin array, which leaves
160 * discarded entries after the ones that actually pointed to nursery objects:
162 * +--------------------+-----------------+---------------------------+--------------------+
163 * | major heap entries | nursery entries | discarded nursery entries | major heap entries |
164 * +--------------------+-----------------+---------------------------+--------------------+
166 * When, due to being out of memory, we late pin more objects, the pin array looks like
169 * +--------------------+-----------------+---------------------------+--------------------+--------------+
170 * | major heap entries | nursery entries | discarded nursery entries | major heap entries | late entries |
171 * +--------------------+-----------------+---------------------------+--------------------+--------------+
173 * This function gets rid of the discarded nursery entries by nulling them out. Note that
174 * we can late pin objects not only in the nursery but also in the major heap, which happens
175 * when evacuation fails.
178 sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot)
180 void **start = sgen_pinning_get_entry (section->pin_queue_last_entry);
181 void **end = sgen_pinning_get_entry (max_pin_slot);
184 for (; start < end; ++start) {
186 if ((char*)addr < section->data || (char*)addr > section->end_data)
192 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
194 sgen_optimize_pin_queue (void)
196 sgen_pointer_queue_sort_uniq (&pin_queue);
200 sgen_get_pinned_count (void)
202 return pin_queue.next_slot;
206 sgen_dump_pin_queue (void)
210 for (i = 0; i < last_num_pinned; ++i) {
211 GCObject *ptr = (GCObject *)pin_queue.data [i];
212 SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr));
216 typedef struct _CementHashEntry CementHashEntry;
217 struct _CementHashEntry {
220 gboolean forced; /* if it should stay cemented after the finishing pause */
223 static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
225 static gboolean cement_enabled = TRUE;
228 sgen_cement_init (gboolean enabled)
230 cement_enabled = enabled;
234 sgen_cement_reset (void)
237 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
238 if (cement_hash [i].forced) {
239 cement_hash [i].forced = FALSE;
241 cement_hash [i].obj = NULL;
242 cement_hash [i].count = 0;
245 binary_protocol_cement_reset ();
250 * The pin_queue should be full and sorted, without entries from the cemented
251 * objects. We traverse the cement hash and check if each object is pinned in
252 * the pin_queue (the pin_queue contains entries between obj and obj+obj_len)
255 sgen_cement_force_pinned (void)
262 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
263 GCObject *obj = cement_hash [i].obj;
267 if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD)
269 SGEN_ASSERT (0, !cement_hash [i].forced, "Why do we have a forced cemented object before forcing ?");
271 /* Returns the index of the target or of the first element greater than it */
272 index = sgen_pointer_queue_search (&pin_queue, obj);
273 if (index == pin_queue.next_slot)
275 SGEN_ASSERT (0, pin_queue.data [index] >= (gpointer)obj, "Binary search should return a pointer greater than the search target");
276 if (pin_queue.data [index] < (gpointer)((char*)obj + sgen_safe_object_get_size (obj)))
277 cement_hash [i].forced = TRUE;
282 sgen_cement_is_forced (GCObject *obj)
284 guint hv = sgen_aligned_addr_hash (obj);
285 int i = SGEN_CEMENT_HASH (hv);
287 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
292 if (!cement_hash [i].obj)
294 if (cement_hash [i].obj != obj)
297 return cement_hash [i].forced;
301 sgen_cement_lookup (GCObject *obj)
303 guint hv = sgen_aligned_addr_hash (obj);
304 int i = SGEN_CEMENT_HASH (hv);
306 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
311 if (!cement_hash [i].obj)
313 if (cement_hash [i].obj != obj)
316 return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD;
320 sgen_cement_lookup_or_register (GCObject *obj)
324 CementHashEntry *hash = cement_hash;
329 hv = sgen_aligned_addr_hash (obj);
330 i = SGEN_CEMENT_HASH (hv);
332 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");
336 old_obj = InterlockedCompareExchangePointer ((gpointer*)&hash [i].obj, obj, NULL);
337 /* Check if the slot was occupied by some other object */
338 if (old_obj != NULL && old_obj != obj)
340 } else if (hash [i].obj != obj) {
344 if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
347 if (InterlockedIncrement ((gint32*)&hash [i].count) == SGEN_CEMENT_THRESHOLD) {
348 SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause.");
349 SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
350 SGEN_CEMENT_OBJECT (obj);
352 binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
353 (int)sgen_safe_object_get_size (obj));
360 pin_from_hash (CementHashEntry *hash, gboolean has_been_reset)
363 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
368 SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent");
370 sgen_pin_stage_ptr (hash [i].obj);
371 binary_protocol_cement_stage (hash [i].obj);
372 /* FIXME: do pin stats if enabled */
374 SGEN_CEMENT_OBJECT (hash [i].obj);
379 sgen_pin_cemented_objects (void)
381 pin_from_hash (cement_hash, TRUE);
385 sgen_cement_clear_below_threshold (void)
388 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
389 if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD) {
390 cement_hash [i].obj = NULL;
391 cement_hash [i].count = 0;
396 #endif /* HAVE_SGEN_GC */