static SgenPointerQueue pin_queue;
static size_t last_num_pinned = 0;
+/*
+ * While we hold the pin_queue_mutex, all objects in pin_queue_objs will
+ * stay pinned, which means they can't move, therefore they can be scanned.
+ */
+static SgenPointerQueue pin_queue_objs;
+static MonoCoopMutex pin_queue_mutex;
#define PIN_HASH_SIZE 1024
static void *pin_hash_filter [PIN_HASH_SIZE];
+void
+sgen_pinning_init (void)
+{
+ mono_coop_mutex_init (&pin_queue_mutex);
+}
+
void
sgen_init_pinning (void)
{
+ mono_coop_mutex_lock (&pin_queue_mutex);
memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
+ sgen_pointer_queue_clear (&pin_queue_objs);
}
void
{
last_num_pinned = pin_queue.next_slot;
sgen_pointer_queue_clear (&pin_queue);
+ mono_coop_mutex_unlock (&pin_queue_mutex);
+}
+
+void
+sgen_pinning_register_pinned_in_nursery (GCObject *obj)
+{
+ sgen_pointer_queue_add (&pin_queue_objs, obj);
+}
+
+void
+sgen_scan_pin_queue_objects (ScanCopyContext ctx)
+{
+ int i;
+ ScanObjectFunc scan_func = ctx.ops->scan_object;
+
+ mono_coop_mutex_lock (&pin_queue_mutex);
+ for (i = 0; i < pin_queue_objs.next_slot; ++i) {
+ GCObject *obj = (GCObject *)pin_queue_objs.data [i];
+ scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx.queue);
+ }
+ mono_coop_mutex_unlock (&pin_queue_mutex);
}
void
int i;
for (i = 0; i < last_num_pinned; ++i) {
- void *ptr = pin_queue.data [i];
+ GCObject *ptr = (GCObject *)pin_queue.data [i];
SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), sgen_safe_object_get_size (ptr));
}
}
struct _CementHashEntry {
GCObject *obj;
unsigned int count;
+ gboolean forced; /* if it should stay cemented after the finishing pause */
};
static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
void
sgen_cement_reset (void)
{
- memset (cement_hash, 0, sizeof (cement_hash));
+ int i;
+ for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
+ if (cement_hash [i].forced) {
+ cement_hash [i].forced = FALSE;
+ } else {
+ cement_hash [i].obj = NULL;
+ cement_hash [i].count = 0;
+ }
+ }
binary_protocol_cement_reset ();
}
+
+/*
+ * The pin_queue should be full and sorted, without entries from the cemented
+ * objects. We traverse the cement hash and check if each object is pinned in
+ * the pin_queue (the pin_queue contains entries between obj and obj+obj_len)
+ */
+void
+sgen_cement_force_pinned (void)
+{
+ int i;
+
+ if (!cement_enabled)
+ return;
+
+ for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
+ GCObject *obj = cement_hash [i].obj;
+ size_t index;
+ if (!obj)
+ continue;
+ if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD)
+ continue;
+ SGEN_ASSERT (0, !cement_hash [i].forced, "Why do we have a forced cemented object before forcing ?");
+
+ /* Returns the index of the target or of the first element greater than it */
+ index = sgen_pointer_queue_search (&pin_queue, obj);
+ if (index == pin_queue.next_slot)
+ continue;
+ SGEN_ASSERT (0, pin_queue.data [index] >= (gpointer)obj, "Binary search should return a pointer greater than the search target");
+ if (pin_queue.data [index] < (gpointer)((char*)obj + sgen_safe_object_get_size (obj)))
+ cement_hash [i].forced = TRUE;
+ }
+}
+
+gboolean
+sgen_cement_is_forced (GCObject *obj)
+{
+ guint hv = sgen_aligned_addr_hash (obj);
+ int i = SGEN_CEMENT_HASH (hv);
+
+ SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
+
+ if (!cement_enabled)
+ return FALSE;
+
+ if (!cement_hash [i].obj)
+ return FALSE;
+ if (cement_hash [i].obj != obj)
+ return FALSE;
+
+ return cement_hash [i].forced;
+}
+
gboolean
sgen_cement_lookup (GCObject *obj)
{