2 * sgen-pinning.c: The pin queue.
4 * Copyright 2001-2003 Ximian, Inc
5 * Copyright 2003-2010 Novell, Inc.
6 * Copyright (C) 2012 Xamarin Inc
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License 2.0 as published by the Free Software Foundation;
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
17 * You should have received a copy of the GNU Library General Public
18 * License 2.0 along with this library; if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include "metadata/sgen-gc.h"
26 #include "metadata/sgen-pinning.h"
27 #include "metadata/sgen-protocol.h"
28 #include "metadata/sgen-pointer-queue.h"
30 static SgenPointerQueue pin_queue;
31 static size_t last_num_pinned = 0;
33 #define PIN_HASH_SIZE 1024
34 static void *pin_hash_filter [PIN_HASH_SIZE];
37 sgen_init_pinning (void)
39 memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
40 pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
44 sgen_finish_pinning (void)
46 last_num_pinned = pin_queue.next_slot;
47 sgen_pointer_queue_clear (&pin_queue);
51 sgen_pin_stage_ptr (void *ptr)
53 /*very simple multiplicative hash function, tons better than simple and'ng */
54 int hash_idx = ((mword)ptr * 1737350767) & (PIN_HASH_SIZE - 1);
55 if (pin_hash_filter [hash_idx] == ptr)
58 pin_hash_filter [hash_idx] = ptr;
60 sgen_pointer_queue_add (&pin_queue, ptr);
64 sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out)
66 size_t first = sgen_pointer_queue_search (&pin_queue, start);
67 size_t last = sgen_pointer_queue_search (&pin_queue, end);
68 SGEN_ASSERT (0, last == pin_queue.next_slot || pin_queue.data [last] >= end, "Pin queue search gone awry");
75 sgen_pinning_get_entry (size_t index)
77 SGEN_ASSERT (0, index <= pin_queue.next_slot, "Pin queue entry out of range");
78 return &pin_queue.data [index];
82 sgen_find_section_pin_queue_start_end (GCMemSection *section)
84 SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
86 sgen_find_optimized_pin_queue_area (section->data, section->end_data,
87 §ion->pin_queue_first_entry, §ion->pin_queue_last_entry);
89 SGEN_LOG (6, "Found %zd pinning addresses in section %p",
90 section->pin_queue_last_entry - section->pin_queue_first_entry, section);
93 /*This will setup the given section for the while pin queue. */
95 sgen_pinning_setup_section (GCMemSection *section)
97 section->pin_queue_first_entry = 0;
98 section->pin_queue_last_entry = pin_queue.next_slot;
102 sgen_pinning_trim_queue_to_section (GCMemSection *section)
104 SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery");
105 pin_queue.next_slot = section->pin_queue_last_entry;
109 * This is called when we've run out of memory during a major collection.
111 * After collecting potential pin entries and sorting the array, this is what it looks like:
113 * +--------------------+---------------------------------------------+--------------------+
114 * | major heap entries | nursery entries | major heap entries |
115 * +--------------------+---------------------------------------------+--------------------+
117 * Of course there might not be major heap entries before and/or after the nursery entries,
118 * depending on where the major heap sections are in the address space, and whether there
119 * were any potential pointers there.
121 * When we pin nursery objects, we compact the nursery part of the pin array, which leaves
122 * discarded entries after the ones that actually pointed to nursery objects:
124 * +--------------------+-----------------+---------------------------+--------------------+
125 * | major heap entries | nursery entries | discarded nursery entries | major heap entries |
126 * +--------------------+-----------------+---------------------------+--------------------+
128 * When, due to being out of memory, we late pin more objects, the pin array looks like
131 * +--------------------+-----------------+---------------------------+--------------------+--------------+
132 * | major heap entries | nursery entries | discarded nursery entries | major heap entries | late entries |
133 * +--------------------+-----------------+---------------------------+--------------------+--------------+
135 * This function gets rid of the discarded nursery entries by nulling them out. Note that
136 * we can late pin objects not only in the nursery but also in the major heap, which happens
137 * when evacuation fails.
140 sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot)
142 void **start = sgen_pinning_get_entry (section->pin_queue_last_entry);
143 void **end = sgen_pinning_get_entry (max_pin_slot);
146 for (; start < end; ++start) {
148 if ((char*)addr < section->data || (char*)addr > section->end_data)
154 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
156 sgen_optimize_pin_queue (void)
158 sgen_pointer_queue_sort_uniq (&pin_queue);
162 sgen_get_pinned_count (void)
164 return pin_queue.next_slot;
168 sgen_dump_pin_queue (void)
172 for (i = 0; i < last_num_pinned; ++i) {
173 void *ptr = pin_queue.data [i];
174 SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_safe_name (ptr), sgen_safe_object_get_size (ptr));
178 typedef struct _CementHashEntry CementHashEntry;
179 struct _CementHashEntry {
184 static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
185 static CementHashEntry cement_hash_concurrent [SGEN_CEMENT_HASH_SIZE];
187 static gboolean cement_enabled = TRUE;
188 static gboolean cement_concurrent = FALSE;
191 sgen_cement_init (gboolean enabled)
193 cement_enabled = enabled;
197 sgen_cement_reset (void)
199 SGEN_ASSERT (1, !cement_concurrent, "Concurrent cementing cannot simply be reset");
201 memset (cement_hash, 0, sizeof (cement_hash));
202 binary_protocol_cement_reset ();
206 * The reason we cannot simply reset cementing at the start of a
207 * concurrent collection is that the nursery collections running
208 * concurrently must keep pinning the cemented objects, because we
209 * don't have the global remsets that point to them anymore - if the
210 * nursery collector moved the cemented objects, we'd have invalid
211 * pointers in the major heap.
213 * What we do instead is to reset cementing at the start of concurrent
214 * collections in such a way that nursery collections happening during
215 * the major collection still pin the formerly cemented objects. We
216 * have a shadow cementing table for that purpose. The nursery
217 * collections still work with the old cementing table, while the
218 * major collector builds up a new cementing table, adding global
219 * remsets whenever needed like usual. When the major collector
220 * finishes, the old cementing table is replaced by the new one.
224 sgen_cement_concurrent_start (void)
226 SGEN_ASSERT (1, !cement_concurrent, "Concurrent cementing has already been started");
227 cement_concurrent = TRUE;
229 memset (cement_hash_concurrent, 0, sizeof (cement_hash));
233 sgen_cement_concurrent_finish (void)
235 SGEN_ASSERT (1, cement_concurrent, "Concurrent cementing hasn't been started");
236 cement_concurrent = FALSE;
238 memcpy (cement_hash, cement_hash_concurrent, sizeof (cement_hash));
242 sgen_cement_lookup (char *obj)
244 guint hv = mono_aligned_addr_hash (obj);
245 int i = SGEN_CEMENT_HASH (hv);
247 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
252 if (!cement_hash [i].obj)
254 if (cement_hash [i].obj != obj)
257 return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD;
261 sgen_cement_lookup_or_register (char *obj)
265 CementHashEntry *hash;
266 gboolean concurrent_cementing = sgen_concurrent_collection_in_progress ();
271 if (concurrent_cementing)
272 SGEN_ASSERT (5, cement_concurrent, "Cementing wasn't inited with concurrent flag");
274 if (concurrent_cementing)
275 hash = cement_hash_concurrent;
279 hv = mono_aligned_addr_hash (obj);
280 i = SGEN_CEMENT_HASH (hv);
282 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");
285 SGEN_ASSERT (5, !hash [i].count, "Cementing hash inconsistent");
287 } else if (hash [i].obj != obj) {
291 if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
295 if (hash [i].count == SGEN_CEMENT_THRESHOLD) {
296 SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
297 SGEN_CEMENT_OBJECT (obj);
299 if (G_UNLIKELY (MONO_GC_OBJ_CEMENTED_ENABLED())) {
300 MonoVTable *vt G_GNUC_UNUSED = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
301 MONO_GC_OBJ_CEMENTED ((mword)obj, sgen_safe_object_get_size ((MonoObject*)obj),
302 vt->klass->name_space, vt->klass->name);
304 binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
305 (int)sgen_safe_object_get_size ((MonoObject*)obj));
312 pin_from_hash (CementHashEntry *hash, gboolean has_been_reset)
315 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
320 SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent");
322 sgen_pin_stage_ptr (hash [i].obj);
323 /* FIXME: do pin stats if enabled */
325 SGEN_CEMENT_OBJECT (hash [i].obj);
330 sgen_pin_cemented_objects (void)
332 pin_from_hash (cement_hash, TRUE);
333 if (cement_concurrent)
334 pin_from_hash (cement_hash_concurrent, FALSE);
338 sgen_cement_clear_below_threshold (void)
341 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
342 if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD) {
343 cement_hash [i].obj = NULL;
344 cement_hash [i].count = 0;
349 #endif /* HAVE_SGEN_GC */