HEAVY_STAT (++stat_wbarrier_object_copy);
- if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
+ SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
+ if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
size = mono_object_class (obj)->instance_size;
mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
size - sizeof (MonoObject));
* @out_array: output array
* @out_size: size of output array
*
- * Store inside @out_array up to @out_size objects that belong to the unloading
- * appdomain @domain. Returns the number of stored items. Can be called repeteadly
- * until it returns 0.
- * The items are removed from the finalizer data structure, so the caller is supposed
- * to finalize them.
- * @out_array should be on the stack to allow the GC to know the objects are still alive.
+ * Enqueue for finalization all objects that belong to the unloading appdomain @domain
+ * @suspend is used for early termination of the enqueuing process.
*/
-int
-mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
+void
+mono_gc_finalize_domain (MonoDomain *domain)
{
- return sgen_gather_finalizers_if (object_in_domain_predicate, domain, out_array, out_size);
+ sgen_finalize_if (object_in_domain_predicate, domain);
+}
+
+void
+mono_gc_suspend_finalizers (void)
+{
+ sgen_set_suspend_finalizers ();
}
/*
sgen_stop_world (0);
if (sgen_concurrent_collection_in_progress ())
- sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
+ sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE, FALSE);
SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
major_collector.finish_sweeping ();
EMIT_TLS_ACCESS_VAR (mb, thread_var);
-#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
- EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
- mono_mb_emit_byte (mb, CEE_LDC_I4_1);
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
- mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
-#endif
-
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
if (atype == ATYPE_SMALL) {
/* size_var = size_arg */
g_assert_not_reached ();
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_1);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
+
/* size += ALLOC_ALIGN - 1; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ /*
+ * We are no longer in a critical section. We need to do this before calling
+ * to unmanaged land in order to avoid stw deadlocks since unmanaged code
+ * might take locks.
+ */
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_0);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
char*
mono_gc_get_description (void)
{
+#ifdef HAVE_CONC_GC_AS_DEFAULT
+ return g_strdup ("sgen (concurrent by default)");
+#else
return g_strdup ("sgen");
+#endif
}
void