Check for missing Assembly.Location
[mono.git] / mono / metadata / sgen-fin-weak-hash.c
1 /*
2  * sgen-fin-weak-hash.c: Finalizers and weak links.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *  Rodrigo Kumpera (kumpera@gmail.com)
7  *
8  * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Copyright 2011 Xamarin, Inc.
11  * Copyright (C) 2012 Xamarin Inc
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Library General Public
15  * License 2.0 as published by the Free Software Foundation;
16  *
17  * This library is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Library General Public License for more details.
21  *
22  * You should have received a copy of the GNU Library General Public
23  * License 2.0 along with this library; if not, write to the Free
24  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #include "config.h"
28 #ifdef HAVE_SGEN_GC
29
30 #include "metadata/sgen-gc.h"
31 #include "metadata/sgen-gray.h"
32 #include "metadata/sgen-protocol.h"
33 #include "metadata/sgen-pointer-queue.h"
34 #include "utils/dtrace.h"
35 #include "utils/mono-counters.h"
36
37 #define ptr_in_nursery sgen_ptr_in_nursery
38
39 typedef SgenGrayQueue GrayQueue;
40
41 int num_ready_finalizers = 0;
42 static int no_finalize = 0;
43
44 #define DISLINK_OBJECT(l)       (REVEAL_POINTER (*(void**)(l)))
45 #define DISLINK_TRACK(l)        ((~(size_t)(*(void**)(l))) & 1)
46
47 /*
48  * The finalizable hash has the object as the key, the 
49  * disappearing_link hash, has the link address as key.
50  *
51  * Copyright 2011 Xamarin Inc.
52  */
53
54 #define TAG_MASK ((mword)0x1)
55
56 static inline MonoObject*
57 tagged_object_get_object (MonoObject *object)
58 {
59         return (MonoObject*)(((mword)object) & ~TAG_MASK);
60 }
61
62 static inline int
63 tagged_object_get_tag (MonoObject *object)
64 {
65         return ((mword)object) & TAG_MASK;
66 }
67
68 static inline MonoObject*
69 tagged_object_apply (void *object, int tag_bits)
70 {
71        return (MonoObject*)((mword)object | (mword)tag_bits);
72 }
73
74 static int
75 tagged_object_hash (MonoObject *o)
76 {
77         return mono_aligned_addr_hash (tagged_object_get_object (o));
78 }
79
80 static gboolean
81 tagged_object_equals (MonoObject *a, MonoObject *b)
82 {
83         return tagged_object_get_object (a) == tagged_object_get_object (b);
84 }
85
86 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
87 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
88
89 static SgenHashTable*
90 get_finalize_entry_hash_table (int generation)
91 {
92         switch (generation) {
93         case GENERATION_NURSERY: return &minor_finalizable_hash;
94         case GENERATION_OLD: return &major_finalizable_hash;
95         default: g_assert_not_reached ();
96         }
97 }
98
99 #define BRIDGE_OBJECT_MARKED 0x1
100
101 /* LOCKING: requires that the GC lock is held */
102 void
103 sgen_mark_bridge_object (MonoObject *obj)
104 {
105         SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
106
107         sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
108 }
109
110 /* LOCKING: requires that the GC lock is held */
111 void
112 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
113 {
114         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
115         GrayQueue *queue = ctx.queue;
116         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
117         MonoObject *object;
118         gpointer dummy G_GNUC_UNUSED;
119         char *copy;
120         SgenPointerQueue moved_fin_objects;
121
122         sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
123
124         if (no_finalize)
125                 return;
126
127         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
128                 int tag = tagged_object_get_tag (object);
129                 object = tagged_object_get_object (object);
130
131                 /* Bridge code told us to ignore this one */
132                 if (tag == BRIDGE_OBJECT_MARKED)
133                         continue;
134
135                 /* Object is a bridge object and major heap says it's dead  */
136                 if (major_collector.is_object_live ((char*)object))
137                         continue;
138
139                 /* Nursery says the object is dead. */
140                 if (!sgen_gc_is_object_ready_for_finalization (object))
141                         continue;
142
143                 if (!sgen_is_bridge_object (object))
144                         continue;
145
146                 copy = (char*)object;
147                 copy_func ((void**)&copy, queue);
148
149                 sgen_bridge_register_finalized_object ((MonoObject*)copy);
150                 
151                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
152                         /* remove from the list */
153                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
154
155                         /* insert it into the major hash */
156                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
157
158                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
159
160                         continue;
161                 } else if (copy != (char*)object) {
162                         /* update pointer */
163                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
164
165                         /* register for reinsertion */
166                         sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
167
168                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
169
170                         continue;
171                 }
172         } SGEN_HASH_TABLE_FOREACH_END;
173
174         while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
175                 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
176         }
177
178         sgen_pointer_queue_free (&moved_fin_objects);
179 }
180
181
182 /* LOCKING: requires that the GC lock is held */
183 void
184 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
185 {
186         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
187         GrayQueue *queue = ctx.queue;
188         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
189         MonoObject *object;
190         gpointer dummy G_GNUC_UNUSED;
191         SgenPointerQueue moved_fin_objects;
192
193         sgen_pointer_queue_init (&moved_fin_objects, INTERNAL_MEM_TEMPORARY);
194
195         if (no_finalize)
196                 return;
197         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
198                 int tag = tagged_object_get_tag (object);
199                 object = tagged_object_get_object (object);
200                 if (!major_collector.is_object_live ((char*)object)) {
201                         gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
202                         MonoObject *copy = object;
203                         copy_func ((void**)&copy, queue);
204                         if (is_fin_ready) {
205                                 /* remove and put in fin_ready_list */
206                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
207                                 num_ready_finalizers++;
208                                 sgen_queue_finalization_entry (copy);
209                                 /* Make it survive */
210                                 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy, sgen_safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
211                                 continue;
212                         } else {
213                                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
214                                         /* remove from the list */
215                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
216
217                                         /* insert it into the major hash */
218                                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
219
220                                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
221
222                                         continue;
223                                 } else if (copy != object) {
224                                         /* update pointer */
225                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
226
227                                         /* register for reinsertion */
228                                         sgen_pointer_queue_add (&moved_fin_objects, tagged_object_apply (copy, tag));
229
230                                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
231
232                                         continue;
233                                 }
234                         }
235                 }
236         } SGEN_HASH_TABLE_FOREACH_END;
237
238         while (!sgen_pointer_queue_is_empty (&moved_fin_objects)) {
239                 sgen_hash_table_replace (hash_table, sgen_pointer_queue_pop (&moved_fin_objects), NULL, NULL);
240         }
241
242         sgen_pointer_queue_free (&moved_fin_objects);
243 }
244
245 /* LOCKING: requires that the GC lock is held */
246 static void
247 register_for_finalization (MonoObject *obj, void *user_data, int generation)
248 {
249         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
250
251         if (no_finalize)
252                 return;
253
254         g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
255
256         if (user_data) {
257                 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL))
258                         SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, obj->vtable->klass->name, hash_table->num_entries, sgen_generation_name (generation));
259         } else {
260                 if (sgen_hash_table_remove (hash_table, obj, NULL))
261                         SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, obj->vtable->klass->name, hash_table->num_entries);
262         }
263 }
264
265 /*
266  * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
267  * up registering them.  Otherwise we'd have to take the GC lock.
268  *
269  * The queues are arrays of `StageEntry`, plus a `next_entry` index.  Threads add entries to
270  * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
271  * `process_stage_entries()` is called to drain it.  A garbage collection will also drain
272  * the queues via the same function.  That implies that `add_stage_entry()`, since it
273  * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
274  * though it doesn't have to make progress while the queue is drained.  In fact, once it
275  * detects that the queue is being drained, it blocks until the draining is done.
276  *
277  * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
278  * entries for the same location might get switched, resulting in the earlier one being
279  * committed and the later one ignored.
280  *
281  * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
282  * currently being drained.  Each entry has a state:
283  *
284  * `STAGE_ENTRY_FREE`: The entry is free.  Its data fields must be `NULL`.
285  *
286  * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
287  *
288  * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
289  * draining round.
290  *
291  * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
292  * invalidated.  Entries that are `BUSY` can obviously not be processed during a drain, but
293  * we can't leave them in place because new entries might be inserted before them, including
294  * from the same thread, violating causality.  An alternative would be not to reset
295  * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
296  * but that can potentially waste the whole queue.
297  *
298  * State transitions:
299  *
300  * | from    | to      | filler? | drainer? |
301  * +---------+---------+---------+----------+
302  * | FREE    | BUSY    | X       |          |
303  * | BUSY    | FREE    | X       |          |
304  * | BUSY    | USED    | X       |          |
305  * | BUSY    | INVALID |         | X        |
306  * | USED    | FREE    |         | X        |
307  * | INVALID | FREE    | X       |          |
308  *
309  * `next_entry` can be incremented either by the filler thread that set the corresponding
310  * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot.  If that
311  * other thread wasn't allowed to increment, it would block on the first filler thread.
312  *
313  * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
314  * changed by that same thread or by the drained.  The drainer can only set a `BUSY` thread
315  * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
316  */
317
318 #define STAGE_ENTRY_FREE        0
319 #define STAGE_ENTRY_BUSY        1
320 #define STAGE_ENTRY_USED        2
321 #define STAGE_ENTRY_INVALID     3
322
323 typedef struct {
324         volatile gint32 state;
325         MonoObject *obj;
326         void *user_data;
327 } StageEntry;
328
329 #define NUM_FIN_STAGE_ENTRIES   1024
330
331 static volatile gint32 next_fin_stage_entry = 0;
332 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
333
334 /*
335  * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
336  * garbage collection.  In that case, the world is already stopped and there's only one
337  * thread operating on the queue.
338  */
339 static void
340 lock_stage_for_processing (volatile gint32 *next_entry)
341 {
342         *next_entry = -1;
343 }
344
345 /*
346  * When processing is triggered by an overflow, we don't want to take the GC lock
347  * immediately, and then set `next_index` to `-1`, because another thread might have drained
348  * the queue in the mean time.  Instead, we make sure the overflow is still there, we
349  * atomically set `next_index`, and only once that happened do we take the GC lock.
350  */
351 static gboolean
352 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
353 {
354         gint32 old = *next_entry;
355         if (old < num_entries)
356                 return FALSE;
357         return InterlockedCompareExchange (next_entry, -1, old) == old;
358 }
359
360 /* LOCKING: requires that the GC lock is held */
361 static void
362 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*, int))
363 {
364         int i;
365
366         /*
367          * This can happen if after setting `next_index` to `-1` in
368          * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
369          * queue and reset `next_entry`.
370          *
371          * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
372          */
373         if (*next_entry != -1)
374                 return;
375
376         for (i = 0; i < num_entries; ++i) {
377                 gint32 state;
378
379         retry:
380                 state = entries [i].state;
381
382                 switch (state) {
383                 case STAGE_ENTRY_FREE:
384                 case STAGE_ENTRY_INVALID:
385                         continue;
386                 case STAGE_ENTRY_BUSY:
387                         /* BUSY -> INVALID */
388                         /*
389                          * This must be done atomically, because the filler thread can set
390                          * the entry to `USED`, in which case we must process it, so we must
391                          * detect that eventuality.
392                          */
393                         if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
394                                 goto retry;
395                         continue;
396                 case STAGE_ENTRY_USED:
397                         break;
398                 default:
399                         SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
400                         break;
401                 }
402
403                 /* state is USED */
404
405                 process_func (entries [i].obj, entries [i].user_data, i);
406
407                 entries [i].obj = NULL;
408                 entries [i].user_data = NULL;
409
410                 mono_memory_write_barrier ();
411
412                 /* USED -> FREE */
413                 /*
414                  * This transition only happens here, so we don't have to do it atomically.
415                  */
416                 entries [i].state = STAGE_ENTRY_FREE;
417         }
418
419         mono_memory_write_barrier ();
420
421         *next_entry = 0;
422 }
423
424 #ifdef HEAVY_STATISTICS
425 static guint64 stat_overflow_abort = 0;
426 static guint64 stat_wait_for_processing = 0;
427 static guint64 stat_increment_other_thread = 0;
428 static guint64 stat_index_decremented = 0;
429 static guint64 stat_entry_invalidated = 0;
430 static guint64 stat_success = 0;
431 #endif
432
433 static int
434 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
435 {
436         gint32 index, new_next_entry, old_next_entry;
437         gint32 previous_state;
438
439  retry:
440         for (;;) {
441                 index = *next_entry;
442                 if (index >= num_entries) {
443                         HEAVY_STAT (++stat_overflow_abort);
444                         return -1;
445                 }
446                 if (index < 0) {
447                         /*
448                          * Backed-off waiting is way more efficient than even using a
449                          * dedicated lock for this.
450                          */
451                         while ((index = *next_entry) < 0) {
452                                 /*
453                                  * This seems like a good value.  Determined by timing
454                                  * sgen-weakref-stress.exe.
455                                  */
456                                 g_usleep (200);
457                                 HEAVY_STAT (++stat_wait_for_processing);
458                         }
459                         continue;
460                 }
461                 /* FREE -> BUSY */
462                 if (entries [index].state != STAGE_ENTRY_FREE ||
463                                 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
464                         /*
465                          * If we can't get the entry it must be because another thread got
466                          * it first.  We don't want to wait for that thread to increment
467                          * `next_entry`, so we try to do it ourselves.  Whether we succeed
468                          * or not, we start over.
469                          */
470                         if (*next_entry == index) {
471                                 InterlockedCompareExchange (next_entry, index + 1, index);
472                                 //g_print ("tried increment for other thread\n");
473                                 HEAVY_STAT (++stat_increment_other_thread);
474                         }
475                         continue;
476                 }
477                 /* state is BUSY now */
478                 mono_memory_write_barrier ();
479                 /*
480                  * Incrementing `next_entry` must happen after setting the state to `BUSY`.
481                  * If it were the other way around, it would be possible that after a filler
482                  * incremented the index, other threads fill up the queue, the queue is
483                  * drained, the original filler finally fills in the slot, but `next_entry`
484                  * ends up at the start of the queue, and new entries are written in the
485                  * queue in front of, not behind, the original filler's entry.
486                  *
487                  * We don't actually require that the CAS succeeds, but we do require that
488                  * the value of `next_entry` is not lower than our index.  Since the drainer
489                  * sets it to `-1`, that also takes care of the case that the drainer is
490                  * currently running.
491                  */
492                 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
493                 if (old_next_entry < index) {
494                         /* BUSY -> FREE */
495                         /* INVALID -> FREE */
496                         /*
497                          * The state might still be `BUSY`, or the drainer could have set it
498                          * to `INVALID`.  In either case, there's no point in CASing.  Set
499                          * it to `FREE` and start over.
500                          */
501                         entries [index].state = STAGE_ENTRY_FREE;
502                         HEAVY_STAT (++stat_index_decremented);
503                         continue;
504                 }
505                 break;
506         }
507
508         SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
509
510         entries [index].obj = obj;
511         entries [index].user_data = user_data;
512
513         mono_memory_write_barrier ();
514
515         new_next_entry = *next_entry;
516         mono_memory_read_barrier ();
517         /* BUSY -> USED */
518         /*
519          * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
520          * `INVALID`.  In the former case, we set it to `USED` and we're finished.  In the
521          * latter case, we reset it to `FREE` and start over.
522          */
523         previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
524         if (previous_state == STAGE_ENTRY_BUSY) {
525                 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
526                 HEAVY_STAT (++stat_success);
527                 return index;
528         }
529
530         SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
531         entries [index].obj = NULL;
532         entries [index].user_data = NULL;
533         mono_memory_write_barrier ();
534         /* INVALID -> FREE */
535         entries [index].state = STAGE_ENTRY_FREE;
536
537         HEAVY_STAT (++stat_entry_invalidated);
538
539         goto retry;
540 }
541
542 /* LOCKING: requires that the GC lock is held */
543 static void
544 process_fin_stage_entry (MonoObject *obj, void *user_data, int index)
545 {
546         if (ptr_in_nursery (obj))
547                 register_for_finalization (obj, user_data, GENERATION_NURSERY);
548         else
549                 register_for_finalization (obj, user_data, GENERATION_OLD);
550 }
551
552 /* LOCKING: requires that the GC lock is held */
553 void
554 sgen_process_fin_stage_entries (void)
555 {
556         lock_stage_for_processing (&next_fin_stage_entry);
557         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
558 }
559
560 void
561 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
562 {
563         while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
564                 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
565                         LOCK_GC;
566                         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
567                         UNLOCK_GC;
568                 }
569         }
570 }
571
572 /* LOCKING: requires that the GC lock is held */
573 static int
574 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
575         SgenHashTable *hash_table)
576 {
577         MonoObject *object;
578         gpointer dummy G_GNUC_UNUSED;
579         int count;
580
581         if (no_finalize || !out_size || !out_array)
582                 return 0;
583         count = 0;
584         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
585                 object = tagged_object_get_object (object);
586
587                 if (mono_object_domain (object) == domain) {
588                         /* remove and put in out_array */
589                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
590                         out_array [count ++] = object;
591                         SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object, sgen_safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
592                         if (count == out_size)
593                                 return count;
594                         continue;
595                 }
596         } SGEN_HASH_TABLE_FOREACH_END;
597         return count;
598 }
599
600 /**
601  * mono_gc_finalizers_for_domain:
602  * @domain: the unloading appdomain
603  * @out_array: output array
604  * @out_size: size of output array
605  *
606  * Store inside @out_array up to @out_size objects that belong to the unloading
607  * appdomain @domain. Returns the number of stored items. Can be called repeteadly
608  * until it returns 0.
609  * The items are removed from the finalizer data structure, so the caller is supposed
610  * to finalize them.
611  * @out_array should be on the stack to allow the GC to know the objects are still alive.
612  */
613 int
614 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
615 {
616         int result;
617
618         LOCK_GC;
619         sgen_process_fin_stage_entries ();
620         result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
621         if (result < out_size) {
622                 result += finalizers_for_domain (domain, out_array + result, out_size - result,
623                         &major_finalizable_hash);
624         }
625         UNLOCK_GC;
626
627         return result;
628 }
629
630 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
631 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
632
633 static SgenHashTable*
634 get_dislink_hash_table (int generation)
635 {
636         switch (generation) {
637         case GENERATION_NURSERY: return &minor_disappearing_link_hash;
638         case GENERATION_OLD: return &major_disappearing_link_hash;
639         default: g_assert_not_reached ();
640         }
641 }
642
643 /* LOCKING: assumes the GC lock is held */
644 static void
645 add_or_remove_disappearing_link (MonoObject *obj, void **link, int generation)
646 {
647         SgenHashTable *hash_table = get_dislink_hash_table (generation);
648
649         if (!obj) {
650                 if (sgen_hash_table_remove (hash_table, link, NULL)) {
651                         SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
652                                         link, hash_table->num_entries, sgen_generation_name (generation));
653                 }
654                 return;
655         }
656
657         sgen_hash_table_replace (hash_table, link, NULL, NULL);
658         SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
659                         obj, obj->vtable->klass->name, link, sgen_generation_name (generation));
660 }
661
662 /* LOCKING: requires that the GC lock is held */
663 void
664 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
665 {
666         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
667         GrayQueue *queue = ctx.queue;
668         void **link;
669         gpointer dummy G_GNUC_UNUSED;
670         SgenHashTable *hash = get_dislink_hash_table (generation);
671
672         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
673                 char *object;
674                 gboolean track;
675
676                 /*
677                 We null a weak link before unregistering it, so it's possible that a thread is
678                 suspended right in between setting the content to null and staging the unregister.
679
680                 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
681
682                 We should simply skip the entry as the staged removal will take place during the next GC.
683                 */
684                 if (!*link) {
685                         SGEN_LOG (5, "Dislink %p was externally nullified", link);
686                         continue;
687                 }
688
689                 track = DISLINK_TRACK (link);
690                 /*
691                  * Tracked references are processed after
692                  * finalization handling whereas standard weak
693                  * references are processed before.  If an
694                  * object is still not marked after finalization
695                  * handling it means that it either doesn't have
696                  * a finalizer or the finalizer has already run,
697                  * so we must null a tracking reference.
698                  */
699                 if (track != before_finalization) {
700                         object = DISLINK_OBJECT (link);
701                         /*
702                         We should guard against a null object been hidden. This can sometimes happen.
703                         */
704                         if (!object) {
705                                 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
706                                 continue;
707                         }
708
709                         if (!major_collector.is_object_live (object)) {
710                                 if (sgen_gc_is_object_ready_for_finalization (object)) {
711                                         *link = NULL;
712                                         binary_protocol_dislink_update (link, NULL, 0, 0);
713                                         SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
714                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
715                                         continue;
716                                 } else {
717                                         char *copy = object;
718                                         copy_func ((void**)&copy, queue);
719
720                                         /* Update pointer if it's moved.  If the object
721                                          * has been moved out of the nursery, we need to
722                                          * remove the link from the minor hash table to
723                                          * the major one.
724                                          *
725                                          * FIXME: what if an object is moved earlier?
726                                          */
727
728                                         if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
729                                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
730
731                                                 g_assert (copy);
732                                                 *link = HIDE_POINTER (copy, track);
733                                                 add_or_remove_disappearing_link ((MonoObject*)copy, link, GENERATION_OLD);
734                                                 binary_protocol_dislink_update (link, copy, track, 0);
735
736                                                 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
737
738                                                 continue;
739                                         } else {
740                                                 *link = HIDE_POINTER (copy, track);
741                                                 binary_protocol_dislink_update (link, copy, track, 0);
742                                                 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
743                                         }
744                                 }
745                         }
746                 }
747         } SGEN_HASH_TABLE_FOREACH_END;
748 }
749
750 /* LOCKING: requires that the GC lock is held */
751 void
752 sgen_null_links_for_domain (MonoDomain *domain, int generation)
753 {
754         void **link;
755         gpointer dummy G_GNUC_UNUSED;
756         SgenHashTable *hash = get_dislink_hash_table (generation);
757         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
758                 char *object = DISLINK_OBJECT (link);
759
760                 if (object)
761                         SGEN_ASSERT (0, ((MonoObject*)object)->vtable, "Can't have objects without vtables.");
762
763                 if (*link && object && ((MonoObject*)object)->vtable->domain == domain) {
764                         *link = NULL;
765                         binary_protocol_dislink_update (link, NULL, 0, 0);
766                         /*
767                          * This can happen if finalizers are not ran, i.e. Environment.Exit ()
768                          * is called from finalizer like in finalizer-abort.cs.
769                          */
770                         SGEN_LOG (5, "Disappearing link %p not freed", link);
771
772                         /*
773                          * FIXME: Why don't we free the entry here?
774                          */
775                         SGEN_HASH_TABLE_FOREACH_REMOVE (FALSE);
776
777                         continue;
778                 }
779         } SGEN_HASH_TABLE_FOREACH_END;
780 }
781
782 /* LOCKING: requires that the GC lock is held */
783 void
784 sgen_null_links_with_predicate (int generation, WeakLinkAlivePredicateFunc predicate, void *data)
785 {
786         void **link;
787         gpointer dummy G_GNUC_UNUSED;
788         SgenHashTable *hash = get_dislink_hash_table (generation);
789         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
790                 char *object = DISLINK_OBJECT (link);
791                 mono_bool is_alive;
792
793                 if (!*link)
794                         continue;
795                 is_alive = predicate ((MonoObject*)object, data);
796
797                 if (!is_alive) {
798                         *link = NULL;
799                         binary_protocol_dislink_update (link, NULL, 0, 0);
800                         SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
801                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
802                         continue;
803                 }
804         } SGEN_HASH_TABLE_FOREACH_END;
805 }
806
807 void
808 sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation)
809 {
810         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
811         MonoObject *object;
812         gpointer dummy G_GNUC_UNUSED;
813
814         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
815                 object = tagged_object_get_object (object);
816
817                 if (mono_object_domain (object) == domain) {
818                         SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object, sgen_safe_name (object));
819
820                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
821                         continue;
822                 }
823         } SGEN_HASH_TABLE_FOREACH_END;  
824 }
825
826 /* LOCKING: requires that the GC lock is held */
827 static void
828 process_dislink_stage_entry (MonoObject *obj, void *_link, int index)
829 {
830         void **link = _link;
831
832         if (index >= 0)
833                 binary_protocol_dislink_process_staged (link, obj, index);
834
835         add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
836         add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
837         if (obj) {
838                 if (ptr_in_nursery (obj))
839                         add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
840                 else
841                         add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
842         }
843 }
844
845 #define NUM_DISLINK_STAGE_ENTRIES       1024
846
847 static volatile gint32 next_dislink_stage_entry = 0;
848 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
849
850 /* LOCKING: requires that the GC lock is held */
851 void
852 sgen_process_dislink_stage_entries (void)
853 {
854         lock_stage_for_processing (&next_dislink_stage_entry);
855         process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
856 }
857
858 void
859 sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
860 {
861
862 #ifdef ENABLE_DTRACE
863         if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
864                 MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
865                 MONO_GC_WEAK_UPDATE ((mword)link,
866                                 *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
867                                 (mword)obj,
868                                 obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
869                                 obj ? vt->klass->name_space : NULL,
870                                 obj ? vt->klass->name : NULL,
871                                 track ? 1 : 0);
872         }
873 #endif
874
875         if (obj)
876                 *link = HIDE_POINTER (obj, track);
877         else
878                 *link = NULL;
879
880 #if 1
881         if (in_gc) {
882                 binary_protocol_dislink_update (link, obj, track, 0);
883                 process_dislink_stage_entry (obj, link, -1);
884         } else {
885                 int index;
886                 binary_protocol_dislink_update (link, obj, track, 1);
887                 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
888                         if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
889                                 LOCK_GC;
890                                 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
891                                 UNLOCK_GC;
892                         }
893                 }
894                 binary_protocol_dislink_update_staged (link, obj, track, index);
895         }
896 #else
897         if (!in_gc)
898                 LOCK_GC;
899         binary_protocol_dislink_update (link, obj, track, 0);
900         process_dislink_stage_entry (obj, link, -1);
901         if (!in_gc)
902                 UNLOCK_GC;
903 #endif
904 }
905
906 void
907 sgen_init_fin_weak_hash (void)
908 {
909 #ifdef HEAVY_STATISTICS
910         mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_success);
911         mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_overflow_abort);
912         mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wait_for_processing);
913         mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_increment_other_thread);
914         mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_index_decremented);
915         mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_entry_invalidated);
916 #endif
917 }
918
919 #endif /* HAVE_SGEN_GC */