HEAVY_STAT (++stat_wbarrier_object_copy);
- if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
+ SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
+ if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
size = mono_object_class (obj)->instance_size;
mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
size - sizeof (MonoObject));
return sgen_gc_invoke_finalizers ();
}
-gboolean
+MonoBoolean
mono_gc_pending_finalizers (void)
{
return sgen_have_pending_finalizers ();
* @out_array: output array
* @out_size: size of output array
*
- * Store inside @out_array up to @out_size objects that belong to the unloading
- * appdomain @domain. Returns the number of stored items. Can be called repeteadly
- * until it returns 0.
- * The items are removed from the finalizer data structure, so the caller is supposed
- * to finalize them.
- * @out_array should be on the stack to allow the GC to know the objects are still alive.
+ * Enqueue for finalization all objects that belong to the unloading appdomain @domain
+ * @suspend is used for early termination of the enqueuing process.
*/
-int
-mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
+void
+mono_gc_finalize_domain (MonoDomain *domain)
+{
+ sgen_finalize_if (object_in_domain_predicate, domain);
+}
+
+void
+mono_gc_suspend_finalizers (void)
{
- return sgen_gather_finalizers_if (object_in_domain_predicate, domain, out_array, out_size);
+ sgen_set_suspend_finalizers ();
}
/*
sgen_stop_world (0);
if (sgen_concurrent_collection_in_progress ())
- sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
+ sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE, FALSE);
SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
major_collector.finish_sweeping ();
#ifdef MANAGED_ALLOCATION
-#ifdef HAVE_KW_THREAD
-
-#define EMIT_TLS_ACCESS_VAR(_mb, _var) /* nothing to do */
-
-#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) \
- do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
- } while (0)
-
-#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
- } while (0)
-
-#else
-
-#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
+#if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
// Cache the SgenThreadInfo pointer in a local 'var'.
#define EMIT_TLS_ACCESS_VAR(mb, var) \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, var) do { \
mono_mb_emit_ldloc ((mb), (var)); \
- mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next)); \
mono_mb_emit_byte ((mb), CEE_ADD); \
- mono_mb_emit_byte ((mb), CEE_LDIND_I); \
} while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, var) do { \
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#endif
#endif
goto done;
}
+ /*
+ * Tls access might call foreign code or code without jinfo. This can
+ * only happen if we are outside of the critical region.
+ */
EMIT_TLS_ACCESS_VAR (mb, thread_var);
-#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
- EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
- mono_mb_emit_byte (mb, CEE_LDC_I4_1);
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
- mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
-#endif
-
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
if (atype == ATYPE_SMALL) {
/* size_var = size_arg */
g_assert_not_reached ();
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_1);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
+
/* size += ALLOC_ALIGN - 1; */
mono_mb_emit_ldloc (mb, size_var);
mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
+ /*
+ * We are no longer in a critical section. We need to do this before calling
+ * to unmanaged land in order to avoid stw deadlocks since unmanaged code
+ * might take locks.
+ */
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb, thread_var);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_0);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
MonoMethod *res;
MonoMethod **cache;
- if (!use_managed_allocator)
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !use_managed_allocator)
return NULL;
- if (!mono_runtime_has_tls_get ())
+ if (variant == MANAGED_ALLOCATOR_REGULAR && !mono_runtime_has_tls_get ())
return NULL;
switch (variant) {
g_assert (info->client_info.stack_end);
aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
+#ifdef HOST_WIN32
+ /* Windows uses a guard page before the committed stack memory pages to detect when the
+ stack needs to be grown. If we suspend a thread just after a function prolog has
+ decremented the stack pointer to point into the guard page but before the thread has
+ been able to read or write to that page, starting the stack scan at aligned_stack_start
+ will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
+ VirtualQuery() to determine whether stack_start points into the guard page and then
+ updates aligned_stack_start to point at the next non-guard page. */
+ MEMORY_BASIC_INFORMATION mem_info;
+ SIZE_T result = VirtualQuery(info->client_info.stack_start, &mem_info, sizeof(mem_info));
+ g_assert (result != 0);
+ if (mem_info.Protect & PAGE_GUARD) {
+ aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
+ }
+#endif
g_assert (info->client_info.suspend_done);
SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
char*
mono_gc_get_description (void)
{
+#ifdef HAVE_CONC_GC_AS_DEFAULT
+ return g_strdup ("sgen (concurrent by default)");
+#else
return g_strdup ("sgen");
+#endif
}
void
} else if (g_str_has_prefix (opt, "toggleref-test")) {
/* FIXME: This should probably in MONO_GC_DEBUG */
sgen_register_test_toggleref_callback ();
- } else {
+ } else if (!sgen_bridge_handle_gc_param (opt)) {
return FALSE;
}
return TRUE;
#if defined(HAVE_KW_THREAD)
/* This can happen with using libmonosgen.so */
- if (mono_tls_key_get_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR) == -1)
+ if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO) == -1)
sgen_set_use_managed_allocator (FALSE);
#endif