Merge pull request #823 from DavidKarlas/master
[mono.git] / mono / metadata / sgen-fin-weak-hash.c
1 /*
2  * sgen-fin-weak-hash.c: Finalizers and weak links.
3  *
4  * Author:
5  *      Paolo Molaro (lupus@ximian.com)
6  *  Rodrigo Kumpera (kumpera@gmail.com)
7  *
8  * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10  * Copyright 2011 Xamarin, Inc.
11  * Copyright (C) 2012 Xamarin Inc
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Library General Public
15  * License 2.0 as published by the Free Software Foundation;
16  *
17  * This library is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Library General Public License for more details.
21  *
22  * You should have received a copy of the GNU Library General Public
23  * License 2.0 along with this library; if not, write to the Free
24  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #include "config.h"
28 #ifdef HAVE_SGEN_GC
29
30 #include "metadata/sgen-gc.h"
31 #include "metadata/sgen-gray.h"
32 #include "metadata/sgen-protocol.h"
33 #include "utils/dtrace.h"
34 #include "utils/mono-counters.h"
35
36 #define ptr_in_nursery sgen_ptr_in_nursery
37
38 typedef SgenGrayQueue GrayQueue;
39
40 int num_ready_finalizers = 0;
41 static int no_finalize = 0;
42
43 #define DISLINK_OBJECT(l)       (REVEAL_POINTER (*(void**)(l)))
44 #define DISLINK_TRACK(l)        ((~(gulong)(*(void**)(l))) & 1)
45
46 /*
47  * The finalizable hash has the object as the key, the 
48  * disappearing_link hash, has the link address as key.
49  *
50  * Copyright 2011 Xamarin Inc.
51  */
52
53 #define TAG_MASK ((mword)0x1)
54
55 static inline MonoObject*
56 tagged_object_get_object (MonoObject *object)
57 {
58         return (MonoObject*)(((mword)object) & ~TAG_MASK);
59 }
60
61 static inline int
62 tagged_object_get_tag (MonoObject *object)
63 {
64         return ((mword)object) & TAG_MASK;
65 }
66
67 static inline MonoObject*
68 tagged_object_apply (void *object, int tag_bits)
69 {
70        return (MonoObject*)((mword)object | (mword)tag_bits);
71 }
72
73 static int
74 tagged_object_hash (MonoObject *o)
75 {
76         return mono_object_hash (tagged_object_get_object (o));
77 }
78
79 static gboolean
80 tagged_object_equals (MonoObject *a, MonoObject *b)
81 {
82         return tagged_object_get_object (a) == tagged_object_get_object (b);
83 }
84
85 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
86 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
87
88 static SgenHashTable*
89 get_finalize_entry_hash_table (int generation)
90 {
91         switch (generation) {
92         case GENERATION_NURSERY: return &minor_finalizable_hash;
93         case GENERATION_OLD: return &major_finalizable_hash;
94         default: g_assert_not_reached ();
95         }
96 }
97
98 #define BRIDGE_OBJECT_MARKED 0x1
99
100 /* LOCKING: requires that the GC lock is held */
101 void
102 sgen_mark_bridge_object (MonoObject *obj)
103 {
104         SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
105
106         sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
107 }
108
109 /* LOCKING: requires that the GC lock is held */
110 void
111 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
112 {
113         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
114         GrayQueue *queue = ctx.queue;
115         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
116         MonoObject *object;
117         gpointer dummy;
118         char *copy;
119
120         if (no_finalize)
121                 return;
122
123         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
124                 int tag = tagged_object_get_tag (object);
125                 object = tagged_object_get_object (object);
126
127                 /* Bridge code told us to ignore this one */
128                 if (tag == BRIDGE_OBJECT_MARKED)
129                         continue;
130
131                 /* Object is a bridge object and major heap says it's dead  */
132                 if (major_collector.is_object_live ((char*)object))
133                         continue;
134
135                 /* Nursery says the object is dead. */
136                 if (!sgen_gc_is_object_ready_for_finalization (object))
137                         continue;
138
139                 if (!sgen_is_bridge_object (object))
140                         continue;
141
142                 copy = (char*)object;
143                 copy_func ((void**)&copy, queue);
144
145                 sgen_bridge_register_finalized_object ((MonoObject*)copy);
146                 
147                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
148                         /* remove from the list */
149                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
150
151                         /* insert it into the major hash */
152                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
153
154                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
155
156                         continue;
157                 } else {
158                         /* update pointer */
159                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
160                         SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
161                 }
162         } SGEN_HASH_TABLE_FOREACH_END;
163 }
164
165
166 /* LOCKING: requires that the GC lock is held */
167 void
168 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
169 {
170         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
171         GrayQueue *queue = ctx.queue;
172         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
173         MonoObject *object;
174         gpointer dummy;
175
176         if (no_finalize)
177                 return;
178         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
179                 int tag = tagged_object_get_tag (object);
180                 object = tagged_object_get_object (object);
181                 if (!major_collector.is_object_live ((char*)object)) {
182                         gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
183                         MonoObject *copy = object;
184                         copy_func ((void**)&copy, queue);
185                         if (is_fin_ready) {
186                                 /* remove and put in fin_ready_list */
187                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
188                                 num_ready_finalizers++;
189                                 sgen_queue_finalization_entry (copy);
190                                 /* Make it survive */
191                                 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy, sgen_safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
192                                 continue;
193                         } else {
194                                 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
195                                         /* remove from the list */
196                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
197
198                                         /* insert it into the major hash */
199                                         sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
200
201                                         SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
202
203                                         continue;
204                                 } else {
205                                         /* update pointer */
206                                         SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
207                                         SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
208                                 }
209                         }
210                 }
211         } SGEN_HASH_TABLE_FOREACH_END;
212 }
213
214 /* LOCKING: requires that the GC lock is held */
215 static void
216 register_for_finalization (MonoObject *obj, void *user_data, int generation)
217 {
218         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
219
220         if (no_finalize)
221                 return;
222
223         g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
224
225         if (user_data) {
226                 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL))
227                         SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, obj->vtable->klass->name, hash_table->num_entries, sgen_generation_name (generation));
228         } else {
229                 if (sgen_hash_table_remove (hash_table, obj, NULL))
230                         SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, obj->vtable->klass->name, hash_table->num_entries);
231         }
232 }
233
234 /*
235  * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
236  * up registering them.  Otherwise we'd have to take the GC lock.
237  *
238  * The queues are arrays of `StageEntry`, plus a `next_entry` index.  Threads add entries to
239  * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
240  * `process_stage_entries()` is called to drain it.  A garbage collection will also drain
241  * the queues via the same function.  That implies that `add_stage_entry()`, since it
242  * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
243  * though it doesn't have to make progress while the queue is drained.  In fact, once it
244  * detects that the queue is being drained, it blocks until the draining is done.
245  *
246  * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
247  * entries for the same location might get switched, resulting in the earlier one being
248  * committed and the later one ignored.
249  *
250  * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
251  * currently being drained.  Each entry has a state:
252  *
253  * `STAGE_ENTRY_FREE`: The entry is free.  Its data fields must be `NULL`.
254  *
255  * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
256  *
257  * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
258  * draining round.
259  *
260  * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
261  * invalidated.  Entries that are `BUSY` can obviously not be processed during a drain, but
262  * we can't leave them in place because new entries might be inserted before them, including
263  * from the same thread, violating causality.  An alternative would be not to reset
264  * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
265  * but that can potentially waste the whole queue.
266  *
267  * State transitions:
268  *
269  * | from    | to      | filler? | drainer? |
270  * +---------+---------+---------+----------+
271  * | FREE    | BUSY    | X       |          |
272  * | BUSY    | FREE    | X       |          |
273  * | BUSY    | USED    | X       |          |
274  * | BUSY    | INVALID |         | X        |
275  * | USED    | FREE    |         | X        |
276  * | INVALID | FREE    | X       |          |
277  *
278  * `next_entry` can be incremented either by the filler thread that set the corresponding
279  * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot.  If that
280  * other thread wasn't allowed to increment, it would block on the first filler thread.
281  *
282  * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
283  * changed by that same thread or by the drained.  The drainer can only set a `BUSY` thread
284  * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
285  */
286
287 #define STAGE_ENTRY_FREE        0
288 #define STAGE_ENTRY_BUSY        1
289 #define STAGE_ENTRY_USED        2
290 #define STAGE_ENTRY_INVALID     3
291
292 typedef struct {
293         volatile gint32 state;
294         MonoObject *obj;
295         void *user_data;
296 } StageEntry;
297
298 #define NUM_FIN_STAGE_ENTRIES   1024
299
300 static volatile gint32 next_fin_stage_entry = 0;
301 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
302
303 /*
304  * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
305  * garbage collection.  In that case, the world is already stopped and there's only one
306  * thread operating on the queue.
307  */
308 static void
309 lock_stage_for_processing (volatile gint32 *next_entry)
310 {
311         *next_entry = -1;
312 }
313
314 /*
315  * When processing is triggered by an overflow, we don't want to take the GC lock
316  * immediately, and then set `next_index` to `-1`, because another thread might have drained
317  * the queue in the mean time.  Instead, we make sure the overflow is still there, we
318  * atomically set `next_index`, and only once that happened do we take the GC lock.
319  */
320 static gboolean
321 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
322 {
323         gint32 old = *next_entry;
324         if (old < num_entries)
325                 return FALSE;
326         return InterlockedCompareExchange (next_entry, -1, old) == old;
327 }
328
329 /* LOCKING: requires that the GC lock is held */
330 static void
331 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*, int))
332 {
333         int i;
334
335         /*
336          * This can happen if after setting `next_index` to `-1` in
337          * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
338          * queue and reset `next_entry`.
339          *
340          * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
341          */
342         if (*next_entry != -1)
343                 return;
344
345         for (i = 0; i < num_entries; ++i) {
346                 gint32 state;
347
348         retry:
349                 state = entries [i].state;
350
351                 switch (state) {
352                 case STAGE_ENTRY_FREE:
353                 case STAGE_ENTRY_INVALID:
354                         continue;
355                 case STAGE_ENTRY_BUSY:
356                         /* BUSY -> INVALID */
357                         /*
358                          * This must be done atomically, because the filler thread can set
359                          * the entry to `USED`, in which case we must process it, so we must
360                          * detect that eventuality.
361                          */
362                         if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
363                                 goto retry;
364                         continue;
365                 case STAGE_ENTRY_USED:
366                         break;
367                 default:
368                         SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
369                         break;
370                 }
371
372                 /* state is USED */
373
374                 process_func (entries [i].obj, entries [i].user_data, i);
375
376                 entries [i].obj = NULL;
377                 entries [i].user_data = NULL;
378
379                 mono_memory_write_barrier ();
380
381                 /* USED -> FREE */
382                 /*
383                  * This transition only happens here, so we don't have to do it atomically.
384                  */
385                 entries [i].state = STAGE_ENTRY_FREE;
386         }
387
388         mono_memory_write_barrier ();
389
390         *next_entry = 0;
391 }
392
393 #ifdef HEAVY_STATISTICS
394 static long long stat_overflow_abort = 0;
395 static long long stat_wait_for_processing = 0;
396 static long long stat_increment_other_thread = 0;
397 static long long stat_index_decremented = 0;
398 static long long stat_entry_invalidated = 0;
399 static long long stat_success = 0;
400 #endif
401
402 static int
403 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
404 {
405         gint32 index, new_next_entry, old_next_entry;
406         gint32 previous_state;
407
408  retry:
409         for (;;) {
410                 index = *next_entry;
411                 if (index >= num_entries) {
412                         HEAVY_STAT (++stat_overflow_abort);
413                         return -1;
414                 }
415                 if (index < 0) {
416                         /*
417                          * Backed-off waiting is way more efficient than even using a
418                          * dedicated lock for this.
419                          */
420                         while ((index = *next_entry) < 0) {
421                                 /*
422                                  * This seems like a good value.  Determined by timing
423                                  * sgen-weakref-stress.exe.
424                                  */
425                                 g_usleep (200);
426                                 HEAVY_STAT (++stat_wait_for_processing);
427                         }
428                         continue;
429                 }
430                 /* FREE -> BUSY */
431                 if (entries [index].state != STAGE_ENTRY_FREE ||
432                                 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
433                         /*
434                          * If we can't get the entry it must be because another thread got
435                          * it first.  We don't want to wait for that thread to increment
436                          * `next_entry`, so we try to do it ourselves.  Whether we succeed
437                          * or not, we start over.
438                          */
439                         if (*next_entry == index) {
440                                 InterlockedCompareExchange (next_entry, index + 1, index);
441                                 //g_print ("tried increment for other thread\n");
442                                 HEAVY_STAT (++stat_increment_other_thread);
443                         }
444                         continue;
445                 }
446                 /* state is BUSY now */
447                 mono_memory_write_barrier ();
448                 /*
449                  * Incrementing `next_entry` must happen after setting the state to `BUSY`.
450                  * If it were the other way around, it would be possible that after a filler
451                  * incremented the index, other threads fill up the queue, the queue is
452                  * drained, the original filler finally fills in the slot, but `next_entry`
453                  * ends up at the start of the queue, and new entries are written in the
454                  * queue in front of, not behind, the original filler's entry.
455                  *
456                  * We don't actually require that the CAS succeeds, but we do require that
457                  * the value of `next_entry` is not lower than our index.  Since the drainer
458                  * sets it to `-1`, that also takes care of the case that the drainer is
459                  * currently running.
460                  */
461                 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
462                 if (old_next_entry < index) {
463                         /* BUSY -> FREE */
464                         /* INVALID -> FREE */
465                         /*
466                          * The state might still be `BUSY`, or the drainer could have set it
467                          * to `INVALID`.  In either case, there's no point in CASing.  Set
468                          * it to `FREE` and start over.
469                          */
470                         entries [index].state = STAGE_ENTRY_FREE;
471                         HEAVY_STAT (++stat_index_decremented);
472                         continue;
473                 }
474                 break;
475         }
476
477         SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
478
479         entries [index].obj = obj;
480         entries [index].user_data = user_data;
481
482         mono_memory_write_barrier ();
483
484         new_next_entry = *next_entry;
485         mono_memory_read_barrier ();
486         /* BUSY -> USED */
487         /*
488          * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
489          * `INVALID`.  In the former case, we set it to `USED` and we're finished.  In the
490          * latter case, we reset it to `FREE` and start over.
491          */
492         previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
493         if (previous_state == STAGE_ENTRY_BUSY) {
494                 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
495                 HEAVY_STAT (++stat_success);
496                 return index;
497         }
498
499         SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
500         entries [index].obj = NULL;
501         entries [index].user_data = NULL;
502         mono_memory_write_barrier ();
503         /* INVALID -> FREE */
504         entries [index].state = STAGE_ENTRY_FREE;
505
506         HEAVY_STAT (++stat_entry_invalidated);
507
508         goto retry;
509 }
510
511 /* LOCKING: requires that the GC lock is held */
512 static void
513 process_fin_stage_entry (MonoObject *obj, void *user_data, int index)
514 {
515         if (ptr_in_nursery (obj))
516                 register_for_finalization (obj, user_data, GENERATION_NURSERY);
517         else
518                 register_for_finalization (obj, user_data, GENERATION_OLD);
519 }
520
521 /* LOCKING: requires that the GC lock is held */
522 void
523 sgen_process_fin_stage_entries (void)
524 {
525         lock_stage_for_processing (&next_fin_stage_entry);
526         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
527 }
528
529 void
530 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
531 {
532         while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
533                 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
534                         LOCK_GC;
535                         process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
536                         UNLOCK_GC;
537                 }
538         }
539 }
540
541 /* LOCKING: requires that the GC lock is held */
542 static int
543 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
544         SgenHashTable *hash_table)
545 {
546         MonoObject *object;
547         gpointer dummy;
548         int count;
549
550         if (no_finalize || !out_size || !out_array)
551                 return 0;
552         count = 0;
553         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
554                 object = tagged_object_get_object (object);
555
556                 if (mono_object_domain (object) == domain) {
557                         /* remove and put in out_array */
558                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
559                         out_array [count ++] = object;
560                         SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object, sgen_safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
561                         if (count == out_size)
562                                 return count;
563                         continue;
564                 }
565         } SGEN_HASH_TABLE_FOREACH_END;
566         return count;
567 }
568
569 /**
570  * mono_gc_finalizers_for_domain:
571  * @domain: the unloading appdomain
572  * @out_array: output array
573  * @out_size: size of output array
574  *
575  * Store inside @out_array up to @out_size objects that belong to the unloading
576  * appdomain @domain. Returns the number of stored items. Can be called repeteadly
577  * until it returns 0.
578  * The items are removed from the finalizer data structure, so the caller is supposed
579  * to finalize them.
580  * @out_array should be on the stack to allow the GC to know the objects are still alive.
581  */
582 int
583 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
584 {
585         int result;
586
587         LOCK_GC;
588         sgen_process_fin_stage_entries ();
589         result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
590         if (result < out_size) {
591                 result += finalizers_for_domain (domain, out_array + result, out_size - result,
592                         &major_finalizable_hash);
593         }
594         UNLOCK_GC;
595
596         return result;
597 }
598
599 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
600 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
601
602 static SgenHashTable*
603 get_dislink_hash_table (int generation)
604 {
605         switch (generation) {
606         case GENERATION_NURSERY: return &minor_disappearing_link_hash;
607         case GENERATION_OLD: return &major_disappearing_link_hash;
608         default: g_assert_not_reached ();
609         }
610 }
611
612 /* LOCKING: assumes the GC lock is held */
613 static void
614 add_or_remove_disappearing_link (MonoObject *obj, void **link, int generation)
615 {
616         SgenHashTable *hash_table = get_dislink_hash_table (generation);
617
618         if (!obj) {
619                 if (sgen_hash_table_remove (hash_table, link, NULL)) {
620                         SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
621                                         link, hash_table->num_entries, sgen_generation_name (generation));
622                 }
623                 return;
624         }
625
626         sgen_hash_table_replace (hash_table, link, NULL, NULL);
627         SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
628                         obj, obj->vtable->klass->name, link, sgen_generation_name (generation));
629 }
630
631 /* LOCKING: requires that the GC lock is held */
632 void
633 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
634 {
635         CopyOrMarkObjectFunc copy_func = ctx.copy_func;
636         GrayQueue *queue = ctx.queue;
637         void **link;
638         gpointer dummy;
639         SgenHashTable *hash = get_dislink_hash_table (generation);
640
641         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
642                 char *object;
643                 gboolean track;
644
645                 /*
646                 We null a weak link before unregistering it, so it's possible that a thread is
647                 suspended right in between setting the content to null and staging the unregister.
648
649                 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
650
651                 We should simply skip the entry as the staged removal will take place during the next GC.
652                 */
653                 if (!*link) {
654                         SGEN_LOG (5, "Dislink %p was externally nullified", link);
655                         continue;
656                 }
657
658                 track = DISLINK_TRACK (link);
659                 /*
660                  * Tracked references are processed after
661                  * finalization handling whereas standard weak
662                  * references are processed before.  If an
663                  * object is still not marked after finalization
664                  * handling it means that it either doesn't have
665                  * a finalizer or the finalizer has already run,
666                  * so we must null a tracking reference.
667                  */
668                 if (track != before_finalization) {
669                         object = DISLINK_OBJECT (link);
670                         /*
671                         We should guard against a null object been hidden. This can sometimes happen.
672                         */
673                         if (!object) {
674                                 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
675                                 continue;
676                         }
677
678                         if (!major_collector.is_object_live (object)) {
679                                 if (sgen_gc_is_object_ready_for_finalization (object)) {
680                                         *link = NULL;
681                                         binary_protocol_dislink_update (link, NULL, 0, 0);
682                                         SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
683                                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
684                                         continue;
685                                 } else {
686                                         char *copy = object;
687                                         copy_func ((void**)&copy, queue);
688
689                                         /* Update pointer if it's moved.  If the object
690                                          * has been moved out of the nursery, we need to
691                                          * remove the link from the minor hash table to
692                                          * the major one.
693                                          *
694                                          * FIXME: what if an object is moved earlier?
695                                          */
696
697                                         if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
698                                                 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
699
700                                                 g_assert (copy);
701                                                 *link = HIDE_POINTER (copy, track);
702                                                 add_or_remove_disappearing_link ((MonoObject*)copy, link, GENERATION_OLD);
703                                                 binary_protocol_dislink_update (link, copy, track, 0);
704
705                                                 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
706
707                                                 continue;
708                                         } else {
709                                                 *link = HIDE_POINTER (copy, track);
710                                                 binary_protocol_dislink_update (link, copy, track, 0);
711                                                 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
712                                         }
713                                 }
714                         }
715                 }
716         } SGEN_HASH_TABLE_FOREACH_END;
717 }
718
719 /* LOCKING: requires that the GC lock is held */
720 void
721 sgen_null_links_for_domain (MonoDomain *domain, int generation)
722 {
723         void **link;
724         gpointer dummy;
725         SgenHashTable *hash = get_dislink_hash_table (generation);
726         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
727                 char *object = DISLINK_OBJECT (link);
728                 if (*link && object && !((MonoObject*)object)->vtable) {
729                         gboolean free = TRUE;
730
731                         if (*link) {
732                                 *link = NULL;
733                                 binary_protocol_dislink_update (link, NULL, 0, 0);
734                                 free = FALSE;
735                                 /*
736                                  * This can happen if finalizers are not ran, i.e. Environment.Exit ()
737                                  * is called from finalizer like in finalizer-abort.cs.
738                                  */
739                                 SGEN_LOG (5, "Disappearing link %p not freed", link);
740                         }
741
742                         SGEN_HASH_TABLE_FOREACH_REMOVE (free);
743
744                         continue;
745                 }
746         } SGEN_HASH_TABLE_FOREACH_END;
747 }
748
749 /* LOCKING: requires that the GC lock is held */
750 void
751 sgen_null_links_with_predicate (int generation, WeakLinkAlivePredicateFunc predicate, void *data)
752 {
753         void **link;
754         gpointer dummy;
755         SgenHashTable *hash = get_dislink_hash_table (generation);
756         SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
757                 char *object = DISLINK_OBJECT (link);
758                 mono_bool is_alive;
759
760                 if (!*link)
761                         continue;
762                 is_alive = predicate ((MonoObject*)object, data);
763
764                 if (!is_alive) {
765                         *link = NULL;
766                         binary_protocol_dislink_update (link, NULL, 0, 0);
767                         SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
768                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
769                         continue;
770                 }
771         } SGEN_HASH_TABLE_FOREACH_END;
772 }
773
774 void
775 sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation)
776 {
777         SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
778         MonoObject *object;
779         gpointer dummy;
780
781         SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
782                 object = tagged_object_get_object (object);
783
784                 if (mono_object_domain (object) == domain) {
785                         SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object, sgen_safe_name (object));
786
787                         SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
788                         continue;
789                 }
790         } SGEN_HASH_TABLE_FOREACH_END;  
791 }
792
793 /* LOCKING: requires that the GC lock is held */
794 static void
795 process_dislink_stage_entry (MonoObject *obj, void *_link, int index)
796 {
797         void **link = _link;
798
799         if (index >= 0)
800                 binary_protocol_dislink_process_staged (link, obj, index);
801
802         add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
803         add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
804         if (obj) {
805                 if (ptr_in_nursery (obj))
806                         add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
807                 else
808                         add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
809         }
810 }
811
812 #define NUM_DISLINK_STAGE_ENTRIES       1024
813
814 static volatile gint32 next_dislink_stage_entry = 0;
815 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
816
817 /* LOCKING: requires that the GC lock is held */
818 void
819 sgen_process_dislink_stage_entries (void)
820 {
821         lock_stage_for_processing (&next_dislink_stage_entry);
822         process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
823 }
824
825 void
826 sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
827 {
828
829 #ifdef ENABLE_DTRACE
830         if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
831                 MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
832                 MONO_GC_WEAK_UPDATE ((mword)link,
833                                 *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
834                                 (mword)obj,
835                                 obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
836                                 obj ? vt->klass->name_space : NULL,
837                                 obj ? vt->klass->name : NULL,
838                                 track ? 1 : 0);
839         }
840 #endif
841
842         if (obj)
843                 *link = HIDE_POINTER (obj, track);
844         else
845                 *link = NULL;
846
847 #if 1
848         if (in_gc) {
849                 binary_protocol_dislink_update (link, obj, track, 0);
850                 process_dislink_stage_entry (obj, link, -1);
851         } else {
852                 int index;
853                 binary_protocol_dislink_update (link, obj, track, 1);
854                 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
855                         if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
856                                 LOCK_GC;
857                                 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
858                                 UNLOCK_GC;
859                         }
860                 }
861                 binary_protocol_dislink_update_staged (link, obj, track, index);
862         }
863 #else
864         if (!in_gc)
865                 LOCK_GC;
866         binary_protocol_dislink_update (link, obj, track, 0);
867         process_dislink_stage_entry (obj, link, -1);
868         if (!in_gc)
869                 UNLOCK_GC;
870 #endif
871 }
872
873 void
874 sgen_init_fin_weak_hash (void)
875 {
876 #ifdef HEAVY_STATISTICS
877         mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_success);
878         mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_overflow_abort);
879         mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wait_for_processing);
880         mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_increment_other_thread);
881         mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_index_decremented);
882         mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_entry_invalidated);
883 #endif
884 }
885
886 #endif /* HAVE_SGEN_GC */