Merge pull request #1668 from alexanderkyte/bug1856
[mono.git] / mono / sgen / sgen-fin-weak-hash.c
1 /*
2  * sgen-fin-weak-hash.c: Finalizers and weak links.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *  Rodrigo Kumpera (kumpera@gmail.com)
7  *
8  * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Copyright 2011 Xamarin, Inc.
11  * Copyright (C) 2012 Xamarin Inc
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Library General Public
15  * License 2.0 as published by the Free Software Foundation;
16  *
17  * This library is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Library General Public License for more details.
21  *
22  * You should have received a copy of the GNU Library General Public
23  * License 2.0 along with this library; if not, write to the Free
24  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #include "config.h"
28 #ifdef HAVE_SGEN_GC
29
30 #include "mono/sgen/sgen-gc.h"
31 #include "mono/sgen/sgen-gray.h"
32 #include "mono/sgen/sgen-protocol.h"
33 #include "mono/sgen/sgen-pointer-queue.h"
34 #include "mono/sgen/sgen-client.h"
35 #include "mono/utils/mono-membar.h"
36
37 #define ptr_in_nursery sgen_ptr_in_nursery
38
39 typedef SgenGrayQueue GrayQueue;
40
41 static int no_finalize = 0;
42
43 #define DISLINK_OBJECT(l)       (REVEAL_POINTER (*(void**)(l)))
44 #define DISLINK_TRACK(l)        ((~(size_t)(*(void**)(l))) & 1)
45
46 /*
47  * The finalizable hash has the object as the key, the 
48  * disappearing_link hash, has the link address as key.
49  *
50  * Copyright 2011 Xamarin Inc.
51  */
52
53 #define TAG_MASK ((mword)0x1)
54
55 static inline GCObject*
56 tagged_object_get_object (GCObject *object)
57 {
58         return (GCObject*)(((mword)object) & ~TAG_MASK);
59 }
60
61 static inline int
62 tagged_object_get_tag (GCObject *object)
63 {
64         return ((mword)object) & TAG_MASK;
65 }
66
67 static inline GCObject*
68 tagged_object_apply (void *object, int tag_bits)
69 {
70        return (GCObject*)((mword)object | (mword)tag_bits);
71 }
72
73 static int
74 tagged_object_hash (GCObject *o)
75 {
76         return sgen_aligned_addr_hash (tagged_object_get_object (o));
77 }
78
79 static gboolean
80 tagged_object_equals (GCObject *a, GCObject *b)
81 {
82         return tagged_object_get_object (a) == tagged_object_get_object (b);
83 }
84
85 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
86 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
87
88 static SgenHashTable*
89 get_finalize_entry_hash_table (int generation)
90 {
91         switch (generation) {
92         case GENERATION_NURSERY: return &minor_finalizable_hash;
93         case GENERATION_OLD: return &major_finalizable_hash;
94         default: g_assert_not_reached ();
95         }
96 }
97
98 #define BRIDGE_OBJECT_MARKED 0x1
99
100 /* LOCKING: requires that the GC lock is held */
101 void
102 sgen_mark_bridge_object (GCObject *obj)
103 {
104         SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
105
106         sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
107 }
108
109 /* LOCKING: requires that the GC lock is held */
110 void
111 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
112 {
113         CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
114         GrayQueue *queue = ctx.queue;
115         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
116         GCObject *object;
117         gpointer dummy G_GNUC_UNUSED;
118         char *copy;
119         SgenPointerQueue moved_fin_objects;
120
121         sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
122
123         if (no_finalize)
124                 return;
125
126         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
127                 int tag = tagged_object_get_tag (object);
128                 object = tagged_object_get_object (object);
129
130                 /* Bridge code told us to ignore this one */
131                 if (tag == BRIDGE_OBJECT_MARKED)
132                         continue;
133
134                 /* Object is a bridge object and major heap says it's dead  */
135                 if (major_collector.is_object_live ((char*)object))
136                         continue;
137
138                 /* Nursery says the object is dead. */
139                 if (!sgen_gc_is_object_ready_for_finalization (object))
140                         continue;
141
142                 if (!sgen_client_bridge_is_bridge_object (object))
143                         continue;
144
145                 copy = (char*)object;
146                 copy_func ((void**)&copy, queue);
147
148                 sgen_client_bridge_register_finalized_object ((GCObject*)copy);
149                 
150                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
151                         /* remove from the list */
152                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
153
154                         /* insert it into the major hash */
155                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
156
157                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
158
159                         continue;
160                 } else if (copy != (char*)object) {
161                         /* update pointer */
162                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
163
164                         /* register for reinsertion */
165                         sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
166
167                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
168
169                         continue;
170                 }
171         } SGEN_HASH_TABLE_FOREACH_END;
172
173         while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
174                 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
175         }
176
177         sgen_pointer_queue_free (&moved_fin_objects);
178 }
179
180
181 /* LOCKING: requires that the GC lock is held */
182 void
183 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
184 {
185         CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
186         GrayQueue *queue = ctx.queue;
187         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
188         GCObject *object;
189         gpointer dummy G_GNUC_UNUSED;
190         SgenPointerQueue moved_fin_objects;
191
192         sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
193
194         if (no_finalize)
195                 return;
196         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
197                 int tag = tagged_object_get_tag (object);
198                 object = tagged_object_get_object (object);
199                 if (!major_collector.is_object_live ((char*)object)) {
200                         gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
201                         GCObject *copy = object;
202                         copy_func ((void**)&copy, queue);
203                         if (is_fin_ready) {
204                                 /* remove and put in fin_ready_list */
205                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
206                                 sgen_queue_finalization_entry (copy);
207                                 /* Make it survive */
208                                 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object, sgen_hash_table_num_entries (hash_table));
209                                 continue;
210                         } else {
211                                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
212                                         /* remove from the list */
213                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
214
215                                         /* insert it into the major hash */
216                                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
217
218                                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
219
220                                         continue;
221                                 } else if (copy != object) {
222                                         /* update pointer */
223                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
224
225                                         /* register for reinsertion */
226                                         sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
227
228                                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (copy)), object);
229
230                                         continue;
231                                 }
232                         }
233                 }
234         } SGEN_HASH_TABLE_FOREACH_END;
235
236         while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
237                 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
238         }
239
240         sgen_pointer_queue_free (&moved_fin_objects);
241 }
242
243 /* LOCKING: requires that the GC lock is held */
244 static void
245 register_for_finalization (GCObject *obj, void *user_data, int generation)
246 {
247         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
248
249         if (no_finalize)
250                 return;
251
252         if (user_data) {
253                 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL)) {
254                         GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
255                         SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries, sgen_generation_name (generation));
256                 }
257         } else {
258                 if (sgen_hash_table_remove (hash_table, obj, NULL)) {
259                         GCVTable *vt = SGEN_LOAD_VTABLE_UNCHECKED (obj);
260                         SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, sgen_client_vtable_get_name (vt), hash_table->num_entries);
261                 }
262         }
263 }
264
265 /*
266  * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
267  * up registering them.  Otherwise we'd have to take the GC lock.
268  *
269  * The queues are arrays of `StageEntry`, plus a `next_entry` index.  Threads add entries to
270  * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
271  * `process_stage_entries()` is called to drain it.  A garbage collection will also drain
272  * the queues via the same function.  That implies that `add_stage_entry()`, since it
273  * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
274  * though it doesn't have to make progress while the queue is drained.  In fact, once it
275  * detects that the queue is being drained, it blocks until the draining is done.
276  *
277  * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
278  * entries for the same location might get switched, resulting in the earlier one being
279  * committed and the later one ignored.
280  *
281  * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
282  * currently being drained.  Each entry has a state:
283  *
284  * `STAGE_ENTRY_FREE`: The entry is free.  Its data fields must be `NULL`.
285  *
286  * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
287  *
288  * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
289  * draining round.
290  *
291  * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
292  * invalidated.  Entries that are `BUSY` can obviously not be processed during a drain, but
293  * we can't leave them in place because new entries might be inserted before them, including
294  * from the same thread, violating causality.  An alternative would be not to reset
295  * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
296  * but that can potentially waste the whole queue.
297  *
298  * State transitions:
299  *
300  * | from    | to      | filler? | drainer? |
301  * +---------+---------+---------+----------+
302  * | FREE    | BUSY    | X       |          |
303  * | BUSY    | FREE    | X       |          |
304  * | BUSY    | USED    | X       |          |
305  * | BUSY    | INVALID |         | X        |
306  * | USED    | FREE    |         | X        |
307  * | INVALID | FREE    | X       |          |
308  *
309  * `next_entry` can be incremented either by the filler thread that set the corresponding
310  * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot.  If that
311  * other thread wasn't allowed to increment, it would block on the first filler thread.
312  *
313  * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
314  * changed by that same thread or by the drained.  The drainer can only set a `BUSY` thread
315  * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
316  */
317
318 #define STAGE_ENTRY_FREE        0
319 #define STAGE_ENTRY_BUSY        1
320 #define STAGE_ENTRY_USED        2
321 #define STAGE_ENTRY_INVALID     3
322
323 typedef struct {
324         volatile gint32 state;
325         GCObject *obj;
326         void *user_data;
327 } StageEntry;
328
329 #define NUM_FIN_STAGE_ENTRIES   1024
330
331 static volatile gint32 next_fin_stage_entry = 0;
332 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
333
334 /*
335  * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
336  * garbage collection.  In that case, the world is already stopped and there's only one
337  * thread operating on the queue.
338  */
339 static void
340 lock_stage_for_processing (volatile gint32 *next_entry)
341 {
342         *next_entry = -1;
343 }
344
345 /*
346  * When processing is triggered by an overflow, we don't want to take the GC lock
347  * immediately, and then set `next_index` to `-1`, because another thread might have drained
348  * the queue in the mean time.  Instead, we make sure the overflow is still there, we
349  * atomically set `next_index`, and only once that happened do we take the GC lock.
350  */
351 static gboolean
352 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
353 {
354         gint32 old = *next_entry;
355         if (old < num_entries)
356                 return FALSE;
357         return InterlockedCompareExchange (next_entry, -1, old) == old;
358 }
359
360 /* LOCKING: requires that the GC lock is held */
361 static void
362 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (GCObject*, void*, int))
363 {
364         int i;
365
366         /*
367          * This can happen if after setting `next_index` to `-1` in
368          * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
369          * queue and reset `next_entry`.
370          *
371          * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
372          */
373         if (*next_entry != -1)
374                 return;
375
376         for (i = 0; i < num_entries; ++i) {
377                 gint32 state;
378
379         retry:
380                 state = entries [i].state;
381
382                 switch (state) {
383                 case STAGE_ENTRY_FREE:
384                 case STAGE_ENTRY_INVALID:
385                         continue;
386                 case STAGE_ENTRY_BUSY:
387                         /* BUSY -> INVALID */
388                         /*
389                          * This must be done atomically, because the filler thread can set
390                          * the entry to `USED`, in which case we must process it, so we must
391                          * detect that eventuality.
392                          */
393                         if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
394                                 goto retry;
395                         continue;
396                 case STAGE_ENTRY_USED:
397                         break;
398                 default:
399                         SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
400                         break;
401                 }
402
403                 /* state is USED */
404
405                 process_func (entries [i].obj, entries [i].user_data, i);
406
407                 entries [i].obj = NULL;
408                 entries [i].user_data = NULL;
409
410                 mono_memory_write_barrier ();
411
412                 /* USED -> FREE */
413                 /*
414                  * This transition only happens here, so we don't have to do it atomically.
415                  */
416                 entries [i].state = STAGE_ENTRY_FREE;
417         }
418
419         mono_memory_write_barrier ();
420
421         *next_entry = 0;
422 }
423
424 #ifdef HEAVY_STATISTICS
425 static guint64 stat_overflow_abort = 0;
426 static guint64 stat_wait_for_processing = 0;
427 static guint64 stat_increment_other_thread = 0;
428 static guint64 stat_index_decremented = 0;
429 static guint64 stat_entry_invalidated = 0;
430 static guint64 stat_success = 0;
431 #endif
432
433 static int
434 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, GCObject *obj, void *user_data)
435 {
436         gint32 index, new_next_entry, old_next_entry;
437         gint32 previous_state;
438
439  retry:
440         for (;;) {
441                 index = *next_entry;
442                 if (index >= num_entries) {
443                         HEAVY_STAT (++stat_overflow_abort);
444                         return -1;
445                 }
446                 if (index < 0) {
447                         /*
448                          * Backed-off waiting is way more efficient than even using a
449                          * dedicated lock for this.
450                          */
451                         while ((index = *next_entry) < 0) {
452                                 /*
453                                  * This seems like a good value.  Determined by timing
454                                  * sgen-weakref-stress.exe.
455                                  */
456                                 g_usleep (200);
457                                 HEAVY_STAT (++stat_wait_for_processing);
458                         }
459                         continue;
460                 }
461                 /* FREE -> BUSY */
462                 if (entries [index].state != STAGE_ENTRY_FREE ||
463                                 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
464                         /*
465                          * If we can't get the entry it must be because another thread got
466                          * it first.  We don't want to wait for that thread to increment
467                          * `next_entry`, so we try to do it ourselves.  Whether we succeed
468                          * or not, we start over.
469                          */
470                         if (*next_entry == index) {
471                                 InterlockedCompareExchange (next_entry, index + 1, index);
472                                 //g_print ("tried increment for other thread\n");
473                                 HEAVY_STAT (++stat_increment_other_thread);
474                         }
475                         continue;
476                 }
477                 /* state is BUSY now */
478                 mono_memory_write_barrier ();
479                 /*
480                  * Incrementing `next_entry` must happen after setting the state to `BUSY`.
481                  * If it were the other way around, it would be possible that after a filler
482                  * incremented the index, other threads fill up the queue, the queue is
483                  * drained, the original filler finally fills in the slot, but `next_entry`
484                  * ends up at the start of the queue, and new entries are written in the
485                  * queue in front of, not behind, the original filler's entry.
486                  *
487                  * We don't actually require that the CAS succeeds, but we do require that
488                  * the value of `next_entry` is not lower than our index.  Since the drainer
489                  * sets it to `-1`, that also takes care of the case that the drainer is
490                  * currently running.
491                  */
492                 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
493                 if (old_next_entry < index) {
494                         /* BUSY -> FREE */
495                         /* INVALID -> FREE */
496                         /*
497                          * The state might still be `BUSY`, or the drainer could have set it
498                          * to `INVALID`.  In either case, there's no point in CASing.  Set
499                          * it to `FREE` and start over.
500                          */
501                         entries [index].state = STAGE_ENTRY_FREE;
502                         HEAVY_STAT (++stat_index_decremented);
503                         continue;
504                 }
505                 break;
506         }
507
508         SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
509
510         entries [index].obj = obj;
511         entries [index].user_data = user_data;
512
513         mono_memory_write_barrier ();
514
515         new_next_entry = *next_entry;
516         mono_memory_read_barrier ();
517         /* BUSY -> USED */
518         /*
519          * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
520          * `INVALID`.  In the former case, we set it to `USED` and we're finished.  In the
521          * latter case, we reset it to `FREE` and start over.
522          */
523         previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
524         if (previous_state == STAGE_ENTRY_BUSY) {
525                 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
526                 HEAVY_STAT (++stat_success);
527                 return index;
528         }
529
530         SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
531         entries [index].obj = NULL;
532         entries [index].user_data = NULL;
533         mono_memory_write_barrier ();
534         /* INVALID -> FREE */
535         entries [index].state = STAGE_ENTRY_FREE;
536
537         HEAVY_STAT (++stat_entry_invalidated);
538
539         goto retry;
540 }
541
542 /* LOCKING: requires that the GC lock is held */
543 static void
544 process_fin_stage_entry (GCObject *obj, void *user_data, int index)
545 {
546         if (ptr_in_nursery (obj))
547                 register_for_finalization (obj, user_data, GENERATION_NURSERY);
548         else
549                 register_for_finalization (obj, user_data, GENERATION_OLD);
550 }
551
552 /* LOCKING: requires that the GC lock is held */
553 void
554 sgen_process_fin_stage_entries (void)
555 {
556         lock_stage_for_processing (&next_fin_stage_entry);
557         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
558 }
559
560 void
561 sgen_object_register_for_finalization (GCObject *obj, void *user_data)
562 {
563         while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
564                 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
565                         LOCK_GC;
566                         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
567                         UNLOCK_GC;
568                 }
569         }
570 }
571
572 /* LOCKING: requires that the GC lock is held */
573 static int
574 finalizers_with_predicate (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size, SgenHashTable *hash_table)
575 {
576         GCObject *object;
577         gpointer dummy G_GNUC_UNUSED;
578         int count;
579
580         if (no_finalize || !out_size || !out_array)
581                 return 0;
582         count = 0;
583         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
584                 object = tagged_object_get_object (object);
585
586                 if (predicate (object, user_data)) {
587                         /* remove and put in out_array */
588                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
589                         out_array [count ++] = object;
590                         SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d)", object, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (object)), sgen_hash_table_num_entries (hash_table));
591                         if (count == out_size)
592                                 return count;
593                         continue;
594                 }
595         } SGEN_HASH_TABLE_FOREACH_END;
596         return count;
597 }
598
599 /**
600  * sgen_gather_finalizers_if:
601  * @predicate: predicate function
602  * @user_data: predicate function data argument
603  * @out_array: output array
604  * @out_size: size of output array
605  *
606  * Store inside @out_array up to @out_size objects that match @predicate. Returns the number
607  * of stored items. Can be called repeteadly until it returns 0.
608  *
609  * The items are removed from the finalizer data structure, so the caller is supposed
610  * to finalize them.
611  *
612  * @out_array me be on the stack, or registered as a root, to allow the GC to know the
613  * objects are still alive.
614  */
615 int
616 sgen_gather_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, GCObject **out_array, int out_size)
617 {
618         int result;
619
620         LOCK_GC;
621         sgen_process_fin_stage_entries ();
622         result = finalizers_with_predicate (predicate, user_data, (GCObject**)out_array, out_size, &minor_finalizable_hash);
623         if (result < out_size) {
624                 result += finalizers_with_predicate (predicate, user_data, (GCObject**)out_array + result, out_size - result,
625                         &major_finalizable_hash);
626         }
627         UNLOCK_GC;
628
629         return result;
630 }
631
632 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
633 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, sgen_aligned_addr_hash, NULL);
634
635 static SgenHashTable*
636 get_dislink_hash_table (int generation)
637 {
638         switch (generation) {
639         case GENERATION_NURSERY: return &minor_disappearing_link_hash;
640         case GENERATION_OLD: return &major_disappearing_link_hash;
641         default: g_assert_not_reached ();
642         }
643 }
644
645 /* LOCKING: assumes the GC lock is held */
646 static void
647 add_or_remove_disappearing_link (GCObject *obj, void **link, int generation)
648 {
649         SgenHashTable *hash_table = get_dislink_hash_table (generation);
650
651         if (!obj) {
652                 if (sgen_hash_table_remove (hash_table, link, NULL)) {
653                         SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
654                                         link, hash_table->num_entries, sgen_generation_name (generation));
655                 }
656                 return;
657         }
658
659         sgen_hash_table_replace (hash_table, link, NULL, NULL);
660         SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
661                         obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE_UNCHECKED (obj)), link, sgen_generation_name (generation));
662 }
663
664 /* LOCKING: requires that the GC lock is held */
665 void
666 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
667 {
668         CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
669         GrayQueue *queue = ctx.queue;
670         void **link;
671         gpointer dummy G_GNUC_UNUSED;
672         SgenHashTable *hash = get_dislink_hash_table (generation);
673
674         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
675                 char *object;
676                 gboolean track;
677
678                 /*
679                 We null a weak link before unregistering it, so it's possible that a thread is
680                 suspended right in between setting the content to null and staging the unregister.
681
682                 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
683
684                 We should simply skip the entry as the staged removal will take place during the next GC.
685                 */
686                 if (!*link) {
687                         SGEN_LOG (5, "Dislink %p was externally nullified", link);
688                         continue;
689                 }
690
691                 track = DISLINK_TRACK (link);
692                 /*
693                  * Tracked references are processed after
694                  * finalization handling whereas standard weak
695                  * references are processed before.  If an
696                  * object is still not marked after finalization
697                  * handling it means that it either doesn't have
698                  * a finalizer or the finalizer has already run,
699                  * so we must null a tracking reference.
700                  */
701                 if (track != before_finalization) {
702                         object = DISLINK_OBJECT (link);
703                         /*
704                         We should guard against a null object been hidden. This can sometimes happen.
705                         */
706                         if (!object) {
707                                 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
708                                 continue;
709                         }
710
711                         if (!major_collector.is_object_live (object)) {
712                                 if (sgen_gc_is_object_ready_for_finalization (object)) {
713                                         *link = NULL;
714                                         binary_protocol_dislink_update (link, NULL, 0, 0);
715                                         SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
716                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
717                                         continue;
718                                 } else {
719                                         char *copy = object;
720                                         copy_func ((void**)&copy, queue);
721
722                                         /* Update pointer if it's moved.  If the object
723                                          * has been moved out of the nursery, we need to
724                                          * remove the link from the minor hash table to
725                                          * the major one.
726                                          *
727                                          * FIXME: what if an object is moved earlier?
728                                          */
729
730                                         if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
731                                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
732
733                                                 g_assert (copy);
734                                                 *link = HIDE_POINTER (copy, track);
735                                                 add_or_remove_disappearing_link ((GCObject*)copy, link, GENERATION_OLD);
736                                                 binary_protocol_dislink_update (link, copy, track, 0);
737
738                                                 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
739
740                                                 continue;
741                                         } else {
742                                                 *link = HIDE_POINTER (copy, track);
743                                                 binary_protocol_dislink_update (link, copy, track, 0);
744                                                 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
745                                         }
746                                 }
747                         }
748                 }
749         } SGEN_HASH_TABLE_FOREACH_END;
750 }
751
752 /* LOCKING: requires that the GC lock is held */
753 void
754 sgen_null_links_if (SgenObjectPredicateFunc predicate, void *data, int generation)
755 {
756         void **link;
757         gpointer dummy G_GNUC_UNUSED;
758         SgenHashTable *hash = get_dislink_hash_table (generation);
759         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
760                 char *object = DISLINK_OBJECT (link);
761
762                 if (!*link)
763                         continue;
764
765                 if (predicate ((GCObject*)object, data)) {
766                         *link = NULL;
767                         binary_protocol_dislink_update (link, NULL, 0, 0);
768                         SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
769                         SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE /* TRUE */);
770                         continue;
771                 }
772         } SGEN_HASH_TABLE_FOREACH_END;
773 }
774
775 void
776 sgen_remove_finalizers_if (SgenObjectPredicateFunc predicate, void *user_data, int generation)
777 {
778         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
779         GCObject *object;
780         gpointer dummy G_GNUC_UNUSED;
781
782         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
783                 object = tagged_object_get_object (object);
784
785                 if (predicate (object, user_data)) {
786                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
787                         continue;
788                 }
789         } SGEN_HASH_TABLE_FOREACH_END;  
790 }
791
792 /* LOCKING: requires that the GC lock is held */
793 static void
794 process_dislink_stage_entry (GCObject *obj, void *_link, int index)
795 {
796         void **link = _link;
797
798         if (index >= 0)
799                 binary_protocol_dislink_process_staged (link, obj, index);
800
801         add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
802         add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
803         if (obj) {
804                 if (ptr_in_nursery (obj))
805                         add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
806                 else
807                         add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
808         }
809 }
810
811 #define NUM_DISLINK_STAGE_ENTRIES       1024
812
813 static volatile gint32 next_dislink_stage_entry = 0;
814 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
815
816 /* LOCKING: requires that the GC lock is held */
817 void
818 sgen_process_dislink_stage_entries (void)
819 {
820         lock_stage_for_processing (&next_dislink_stage_entry);
821         process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
822 }
823
824 void
825 sgen_register_disappearing_link (GCObject *obj, void **link, gboolean track, gboolean in_gc)
826 {
827         if (obj)
828                 *link = HIDE_POINTER (obj, track);
829         else
830                 *link = NULL;
831
832 #if 1
833         if (in_gc) {
834                 binary_protocol_dislink_update (link, obj, track, 0);
835                 process_dislink_stage_entry (obj, link, -1);
836         } else {
837                 int index;
838                 binary_protocol_dislink_update (link, obj, track, 1);
839                 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
840                         if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
841                                 LOCK_GC;
842                                 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
843                                 UNLOCK_GC;
844                         }
845                 }
846                 binary_protocol_dislink_update_staged (link, obj, track, index);
847         }
848 #else
849         if (!in_gc)
850                 LOCK_GC;
851         binary_protocol_dislink_update (link, obj, track, 0);
852         process_dislink_stage_entry (obj, link, -1);
853         if (!in_gc)
854                 UNLOCK_GC;
855 #endif
856 }
857
858 void
859 sgen_init_fin_weak_hash (void)
860 {
861 #ifdef HEAVY_STATISTICS
862         mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
863         mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
864         mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
865         mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
866         mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
867         mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
868 #endif
869 }
870
871 #endif /* HAVE_SGEN_GC */