AC_ARG_ENABLE(executables, [ --disable-executables disable the build of the runtime executables], enable_executables=$enableval, enable_executables=yes)
AM_CONDITIONAL(DISABLE_EXECUTABLES, test x$enable_executables = xno)
+AC_ARG_ENABLE(extension-module, [ --enable-extension-module enables usage of the extension module], has_extension_module=$enableval, has_extension_module=no)
+AM_CONDITIONAL([HAS_EXTENSION_MODULE], [test x$has_extension_module != xno])
+
+if test x$has_extension_module != xno ; then
+ AC_DEFINE([ENABLE_EXTENSION_MODULE], 1, [Extension module enabled])
+ AC_MSG_NOTICE([Enabling mono extension module.])
+fi
+
+
AC_MSG_CHECKING(for visibility __attribute__)
AC_COMPILE_IFELSE([
AC_LANG_SOURCE([[
echo "MOONLIGHT = 1" >> $srcdir/$mcsdir/build/config.make
fi
+ if test x$has_extension_module != xno; then
+ echo "EXTENSION_MODULE = 1" >> $srcdir/$mcsdir/build/config.make
+ fi
+
default_profile=net_2_0
if test -z "$INSTALL_4_0_TRUE"; then :
default_profile=net_4_0
mono.pc.in mono-2.pc.in monosgen-2.pc.in mint.pc.in dotnet.pc.in dotnet35.pc.in wcf.pc.in monodoc.pc.in \
mono-nunit.pc.in mono-cairo.pc.in mono-options.pc.in cecil.pc.in \
mono-lineeditor.pc.in system.web.extensions_1.0.pc.in system.web.extensions.design_1.0.pc.in\
- dtrace-prelink.sh mono.web.pc.in system.web.mvc.pc.in system.web.mvc2.pc.in system.web.mvc3.pc.in aspnetwebstack.pc.in \
+ dtrace-prelink.sh mono.web.pc.in system.web.mvc.pc.in system.web.mvc2.pc.in system.web.mvc3.pc.in aspnetwebstack.pc.in reactive.pc.in \
net_1_1/machine.config \
gdb/mono-gdb.py \
gdb/gdb-python.diff
if JIT_SUPPORTED
if INTERP_SUPPORTED
pkgconfig_DATA= mono.pc mono-2.pc mint.pc dotnet.pc dotnet35.pc wcf.pc mono-nunit.pc mono-cairo.pc mono-options.pc cecil.pc monodoc.pc mono-lineeditor.pc system.web.extensions_1.0.pc \
- system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc $(SGENPCFILE)
+ system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc reactive.pc $(SGENPCFILE)
else
pkgconfig_DATA= mono.pc mono-2.pc dotnet.pc dotnet35.pc wcf.pc mono-nunit.pc mono-cairo.pc mono-options.pc cecil.pc monodoc.pc mono-lineeditor.pc system.web.extensions_1.0.pc \
- system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc $(SGENPCFILE)
+ system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc reactive.pc $(SGENPCFILE)
endif
else
pkgconfig_DATA= mint.pc mono-nunit.pc mono-cairo.pc mono-options.pc cecil.pc monodoc.pc mono-lineeditor.pc
endif
DISTCLEANFILES= mono-2.pc mono.pc mint.pc dotnet.pc dotnet35.pc wcf.pc mono-nunit.pc mono-cairo.pc mono-options.pc cecil.pc monodoc.pc mono-lineeditor.pc system.web.extensions_1.0.pc \
- system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc $(SGENPCFILE) mono-sgen-gdb.py
+ system.web.extensions.design_1.0.pc mono.web.pc system.web.mvc.pc system.web.mvc2.pc system.web.mvc3.pc aspnetwebstack.pc reactive.pc $(SGENPCFILE) mono-sgen-gdb.py
mono_DATA = config \
browscap.ini
probe gc__begin (int generation);
probe gc__end (int generation);
+ probe gc__concurrent__start__begin (int generation);
+ probe gc__concurrent__start__end (int generation, long long num_major_objects_marked);
+ probe gc__concurrent__update__finish__begin (int generation, long long num_major_objects_marked);
+ probe gc__concurrent__update__end (int generation, long long num_major_objects_marked);
+ probe gc__concurrent__finish__end (int generation, long long num_major_objects_marked);
+
+ probe gc__sweep__begin (int generation, int full_sweep);
+ probe gc__sweep__end (int generation, int full_sweep);
+
+ probe gc__world__stop__begin ();
+ probe gc__world__stop__end ();
+ probe gc__world__restart__begin (int generation);
+ probe gc__world__restart__end (int generation);
+
probe gc__heap__alloc (uintptr_t addr, uintptr_t len);
probe gc__heap__free (uintptr_t addr, uintptr_t len);
probe gc__major__swept (uintptr_t addr, uintptr_t len);
probe gc__obj__pinned (uintptr_t addr, uintptr_t size, char *ns_name, char *class_name, int generation);
+
+ probe gc__finalize__enqueue (uintptr_t addr, uintptr_t size, char *ns_name, char *class_name, int generation, int is_critical);
+ probe gc__finalize__invoke (uintptr_t addr, uintptr_t size, char *ns_name, char *class_name);
+
+ probe gc__weak__update (uintptr_t ref_addr, uintptr_t old_addr, uintptr_t new_addr, uintptr_t size, char *ns_name, char *class_name, int track);
};
#pragma D attributes Evolving/Evolving/Common provider mono provider
--- /dev/null
+Name: Reactive Extensions
+Description: Reactive Extensions
+Version: @VERSION@
+Libs:
+-r:@prefix@/lib/mono/4.5/System.Reactive.Interfaces.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Core.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Linq.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.PlatformServices.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Providers.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Debugger.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Experimental.dll
+-r:@prefix@/lib/mono/4.5/System.Reactive.Runtime.Remoting.dll
.TP
\fBmajor=\fIcollector\fR
Specifies which major collector to use. Options are `marksweep' for
-the Mark&Sweep collector, `marksweep-par' for parallel Mark&Sweep,
-`marksweep-fixed' for Mark&Sweep with a fixed heap,
-`marksweep-fixed-par' for parallel Mark&Sweep with a fixed heap and
-`copying' for the copying collector. The Mark&Sweep collector is the
-default.
+the Mark&Sweep collector, `marksweep-conc' for concurrent Mark&Sweep,
+`marksweep-par' for parallel Mark&Sweep, `marksweep-fixed' for
+Mark&Sweep with a fixed heap, `marksweep-fixed-par' for parallel
+Mark&Sweep with a fixed heap and `copying' for the copying
+collector. The Mark&Sweep collector is the default.
.TP
\fBmajor-heap-size=\fIsize\fR
Sets the size of the major heap (not including the large object space)
type in the next major collection, thereby restoring occupancy to close
to 100 percent. A value of 0 turns evacuation off.
.TP
-\fB(no-)concurrent-sweep\fR
-Enables or disables concurrent sweep for the Mark&Sweep collector. If
-enabled, the sweep phase of the garbage collection is done in a thread
-concurrently with the application. Concurrent sweep is disabled by
-default.
+\fB(no-)lazy-sweep\fR
+Enables or disables lazy sweep for the Mark&Sweep collector. If
+enabled, the sweep phase of the garbage collection is done piecemeal
+whenever the need arises, typically during nursery collections. Lazy
+sweeping is enabled by default.
.TP
\fBstack-mark=\fImark-mode\fR
Specifies how application threads should be scanned. Options are
outfile=$1
incfile=$2
excfile=$3
+extfile=$4
process_includes_1() {
sed -e '/^[ \t]*$/d' -e '/^[ \t]*#/d' $1 > $2
process_includes $incfile $outfile.inc
+if test x$extfile != x -a -f $extfile; then
+ cat $extfile >> $outfile.inc
+fi
+
sort -u $outfile.inc > $outfile.inc_s
rm -f $outfile.inc
sourcefile = $(depsdir)/$(PROFILE)_$(LIBRARY).sources
library_CLEAN_FILES += $(sourcefile)
+ifdef EXTENSION_MODULE
+EXTENSION_include=$(topdir)/../../mono-extensions/mcs/$(thisdir)/$(PROFILE)_$(LIBRARY).sources
+endif
+
# Note, gensources.sh can create a $(sourcefile).makefrag if it sees any '#include's
# We don't include it in the dependencies since it isn't always created
$(sourcefile): $(PROFILE_sources) $(PROFILE_excludes) $(topdir)/build/gensources.sh
@echo Creating the per profile list $@ ...
- $(SHELL) $(topdir)/build/gensources.sh $@ $(PROFILE_sources) $(PROFILE_excludes)
+ $(SHELL) $(topdir)/build/gensources.sh $@ '$(PROFILE_sources)' '$(PROFILE_excludes)' '$(EXTENSION_include)'
endif
PLATFORM_excludes := $(wildcard $(LIBRARY).$(PLATFORM)-excludes)
System.Windows.Forms.DataVisualization \
System.Xaml \
WindowsBase \
+ System.ServiceModel.Routing \
+ System.ServiceModel.Discovery \
+ System.Runtime.Caching \
+ System.Runtime.DurableInstancing \
+ Mono.Parallel \
System.Reactive.Interfaces \
System.Reactive.Core \
System.Reactive.Linq \
System.Reactive.Windows.Threading \
System.Reactive.Experimental \
System.Reactive.Debugger \
- System.ServiceModel.Routing \
- System.ServiceModel.Discovery \
- System.Runtime.Caching \
- System.Runtime.DurableInstancing \
- Mono.Parallel \
Microsoft.Web.Infrastructure \
WebMatrix.Data \
System.Data.Services.Client \
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.Mono.Reactive.Testing.dll
-NO_INSTALL = yes
NO_TEST = yes
endif
+NO_INSTALL = yes
NO_SIGN_ASSEMBLY = yes
include ../../build/library.make
}
- public static DateTimeOffset ConvertTime (DateTimeOffset dateTimeOffset, TimeZoneInfo destinationTimeZone)
+ public static DateTimeOffset ConvertTime(DateTimeOffset dateTimeOffset, TimeZoneInfo destinationTimeZone)
{
- throw new NotImplementedException ();
+ if (destinationTimeZone == null)
+ throw new ArgumentNullException("destinationTimeZone");
+
+ var utcDateTime = dateTimeOffset.UtcDateTime;
+ AdjustmentRule rule = destinationTimeZone.GetApplicableRule (utcDateTime);
+
+ if (rule != null && destinationTimeZone.IsDaylightSavingTime(utcDateTime)) {
+ var offset = destinationTimeZone.BaseUtcOffset + rule.DaylightDelta;
+ return new DateTimeOffset(DateTime.SpecifyKind(utcDateTime, DateTimeKind.Unspecified) + offset, offset);
+ }
+ else {
+ return new DateTimeOffset(DateTime.SpecifyKind(utcDateTime, DateTimeKind.Unspecified) + destinationTimeZone.BaseUtcOffset, destinationTimeZone.BaseUtcOffset);
+ }
}
public static DateTime ConvertTimeBySystemTimeZoneId (DateTime dateTime, string destinationTimeZoneId)
EXTRA_DISTFILES = more_build_args $(RESX_RESOURCES:.resources=.resx) $(PREBUILT)
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Core.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Debugger.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Experimental.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Interfaces.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args $(RESX_RESOURCES:.resources=.resx) $(PREBUILT)
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Linq.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args $(RESX_RESOURCES:.resources=.resx) $(PREBUILT)
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monotouch monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.PlatformServices.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args $(RESX_RESOURCES:.resources=.resx) $(PREBUILT)
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter monodroid net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Providers.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Runtime.Remoting.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Windows.Forms.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
EXTRA_DISTFILES = more_build_args $(RESX_RESOURCES:.resources=.resx) $(PREBUILT)
-VALID_PROFILE := $(filter 2 4, $(FRAMEWORK_VERSION_MAJOR))
+VALID_PROFILE := $(filter net_4_0 net_4_5, $(PROFILE))
ifndef VALID_PROFILE
LIBRARY_NAME = dummy-System.System.Reactive.Windows.Threading.dll
NO_SIGN_ASSEMBLY = yes
endif
+INSTALL_PROFILE := $(filter net_4_5, $(PROFILE))
+ifndef INSTALL_PROFILE
NO_INSTALL = yes
+endif
+
NO_TEST = yes
include ../../build/library.make
private string service_name;
private string[] services_depended_on;
private ServiceStartMode start_type;
-
#if NET_2_0
private string description;
+#endif
+#if NET_4_0
+ private bool delayedAutoStart;
+#endif
+#if NET_4_0
+ [DefaultValue(false)]
+ [ServiceProcessDescription("Indicates that the service's start should be delayed after other automatically started services have started.")]
+ public bool DelayedAutoStart {
+ get {
+ return delayedAutoStart;
+ }
+ set {
+ delayedAutoStart = value;
+ }
+ }
+#endif
+
+#if NET_2_0
[ComVisible (false)]
[DefaultValue ("")]
[ServiceProcessDescription ("Indicates the service's description (a brief comment that explains the purpose of the service). ")]
object[] flatLists = null;
object[] flatListsChoices = null;
Fixup fixup = null;
- int ind = 0;
+ int ind = -1;
int maxInd;
if (readBySoapOrder) {
if (map.ElementMembers != null) maxInd = map.ElementMembers.Count;
- else maxInd = 0;
+ else maxInd = -1;
}
else
maxInd = int.MaxValue;
AddFixup (fixup);
}
- while (Reader.NodeType != System.Xml.XmlNodeType.EndElement && (ind < maxInd))
+ XmlTypeMapMember previousMember = null;
+ while (Reader.NodeType != System.Xml.XmlNodeType.EndElement && (ind < maxInd - 1))
{
if (Reader.NodeType == System.Xml.XmlNodeType.Element)
{
XmlTypeMapElementInfo info;
if (readBySoapOrder) {
- info = map.GetElement (ind++);
+ info = map.GetElement (Reader.LocalName, Reader.NamespaceURI);
}
else if (hasAnyReturnMember) {
info = (XmlTypeMapElementInfo) ((XmlTypeMapMemberElement)map.ReturnMember).ElementInfo[0];
}
else {
if (map.IsOrderDependentMap) {
- while ((info = map.GetElement (ind++)) != null)
- if (info.ElementName == Reader.LocalName && info.Namespace == Reader.NamespaceURI)
- break;
+ info = map.GetElement (Reader.LocalName, Reader.NamespaceURI);
}
else
info = map.GetElement (Reader.LocalName, Reader.NamespaceURI, -1);
if (info != null && !readFlag[info.Member.Index] )
{
+ if (info.Member != previousMember)
+ {
+ ind++;
+ previousMember = info.Member;
+ }
+
+ if (readBySoapOrder && info.ExplicitOrder != ind)
+ throw new InvalidOperationException(string.Format("Element '{0}' has wrong order in sequence (expected - {1}, actual - {2}", Reader.LocalName, info.ExplicitOrder, ind));
+
+
if (info.Member.GetType() == typeof (XmlTypeMapMemberList))
{
if (_format == SerializationFormat.Encoded && info.MultiReferenceType)
if (_elements == null) return null;
return (XmlTypeMapElementInfo)_elements [BuildKey (name,ns, order)];
}
+
+ public XmlTypeMapElementInfo GetElement(string name, string ns)
+ {
+ if (_elements == null) return null;
+
+ foreach (XmlTypeMapElementInfo info in _elements.Values)
+ if (info.ElementName == name && info.Namespace == ns)
+ return info;
+
+ return null;
+ }
public XmlTypeMapElementInfo GetElement (int index)
{
var pathPrefix = ass == "Tests.System.Reactive" ? "../../" : "../";
// tests are built under Mono.Reactive.Testing directory.
+
var sources =
monoass == "Tests.System.Reactive" ?
Path.Combine ("Mono.Reactive.Testing", "Mono.Reactive.Testing_test.dll.sources") :
Path.Combine (monoass, monoass + ".dll.sources");
+ var assdir = Path.Combine (monoass, "Assembly");
+ var assinfo = Path.Combine (monoass, "Assembly", "AssemblyInfo.cs");
+
+ if (monoass != "Tests.System.Reactive") {
+ if (!Directory.Exists (assdir))
+ Directory.CreateDirectory (assdir);
+ using (var tw = File.CreateText (assinfo)) {
+ tw.WriteLine ("// Due to InternalsVisibleTo issue we don't add versions so far...");
+ tw.WriteLine ("// [assembly:System.Reflection.AssemblyVersion (\"0.0.0.0\")]");
+ }
+ }
+
var doc = XDocument.Load (csproj);
var rootNS = doc.XPathSelectElement ("//*[local-name()='RootNamespace']").Value;
using (var tw = File.CreateText (sources)) {
+ //if (monoass != "Tests.System.Reactive")
+ // tw.WriteLine ("Assembly/AssemblyInfo.cs");
foreach (var path in doc.XPathSelectElements ("//*[local-name()='Compile']")
.Select (el => el.Attribute ("Include").Value)
.Select (s => s.Replace ("\\", "/")))
using System.Runtime.InteropServices;
using System.Security.Cryptography;
using System.Text;
+#if FULL_AOT_RUNTIME
+using Crimson.CommonCrypto;
+#endif
namespace System {
[StructLayout (LayoutKind.Sequential)]
[ComVisible (true)]
public struct Guid : IFormattable, IComparable, IComparable<Guid>, IEquatable<Guid> {
-#if MONOTOUCH
+#if FULL_AOT_RUNTIME
static Guid () {
if (MonoTouchAOTHelper.FalseFlag) {
var comparer = new System.Collections.Generic.GenericComparer <Guid> ();
return (char)((b<0xA)?('0' + b):('a' + b - 0xA));
}
- private static object _rngAccess = new object ();
#if !FULL_AOT_RUNTIME
+ private static object _rngAccess = new object ();
private static RandomNumberGenerator _rng;
private static RandomNumberGenerator _fastRng;
-#else
- private static object _fastRng;
#endif
// generated as per section 3.4 of the specification
public static Guid NewGuid ()
{
-#if !FULL_AOT_RUNTIME
byte[] b = new byte [16];
-
+#if !FULL_AOT_RUNTIME
// thread-safe access to the prng
lock (_rngAccess) {
if (_rng == null)
_rng.GetBytes (b);
}
#else
- byte[] b = FastNewGuidArray ();
+ Cryptor.GetRandom (b);
#endif
-
Guid res = new Guid (b);
// Mask in Variant 1-0 in Bit[7..6]
res._d = (byte) ((res._d & 0x3fu) | 0x80u);
return res;
}
+#if !FULL_AOT_RUNTIME
// used in ModuleBuilder so mcs doesn't need to invoke
// CryptoConfig for simple assemblies.
internal static byte[] FastNewGuidArray ()
// thread-safe access to the prng
lock (_rngAccess) {
// if known, use preferred RNG
-#if FULL_AOT_RUNTIME
- if (_fastRng == null)
- _fastRng = new RNGCryptoServiceProvider ();
- (_fastRng as RNGCryptoServiceProvider).GetBytes (guid);
-#else
if (_rng != null)
_fastRng = _rng;
// else use hardcoded default RNG (bypassing CryptoConfig)
if (_fastRng == null)
_fastRng = new RNGCryptoServiceProvider ();
_fastRng.GetBytes (guid);
-#endif
}
// Mask in Variant 1-0 in Bit[7..6]
return guid;
}
-
+#endif
public byte[] ToByteArray ()
{
byte[] res = new byte[16];
string layout = Path.Combine (basedir, "monodoc.xml");
doc.Load (layout);
- string osxExternalDir = "/Library/Frameworks/Mono.framework/External/monodoc";
- string[] osxExternalSources = Directory.Exists (osxExternalDir)
- ? Directory.GetFiles (osxExternalDir, "*.source")
- : new string[0];
-
- return LoadTree (basedir, doc,
- Directory.GetFiles (Path.Combine (basedir, "sources"), "*.source")
- .Concat (osxExternalSources));
+ string[] sourceDirs = new[]{
+ Path.Combine (basedir, "sources"),
+ "/Library/Frameworks/Mono.framework/External/monodoc",
+ Path.Combine (
+ Environment.GetFolderPath (Environment.SpecialFolder.LocalApplicationData),
+ "monodoc"),
+ };
+ var sources = new List<string> ();
+ foreach (var dir in sourceDirs) {
+ if (!Directory.Exists (dir))
+ continue;
+ sources.AddRange (Directory.GetFiles (dir, "*.source"));
+ }
+
+ return LoadTree (basedir, doc, sources);
}
// Compatibility shim w/ Mono 2.6
%_prefix/lib/mono/4.5/Npgsql.dll
%_prefix/lib/mono/gac/Npgsql
+%package -n mono-rx-core
+License: MIT License (or similar) ; Apache License 2.0
+Summary: Reactive Extensions for Mono core libraries
+Group: Development/Languages/Mono
+Requires: mono-core == %version-%release
+Provides: mono(System.Reactive.Interfaces) = 1.0.5000.0
+
+%description -n mono-rx-core
+The Mono Project is an open development initiative that is working to
+develop an open source, Unix version of the .NET development platform.
+Its objective is to enable Unix developers to build and deploy
+cross-platform .NET applications. The project will implement various
+technologies that have been submitted to the ECMA for standardization.
+
+Reactive Extensions for Mono, core packages, which don't depend on
+desktop-specific features.
+
+%files -n mono-rx-core
+%defattr(-, root, root)
+%_libdir/pkgconfig/reactive.core.pc
+%_prefix/lib/mono/4.5/System.Reactive.Core.dll
+%_prefix/lib/mono/4.5/System.Reactive.Debugger.dll
+%_prefix/lib/mono/4.5/System.Reactive.Experimental.dll
+%_prefix/lib/mono/4.5/System.Reactive.Interfaces.dll
+%_prefix/lib/mono/4.5/System.Reactive.Linq.dll
+%_prefix/lib/mono/4.5/System.Reactive.PlatformServices.dll
+%_prefix/lib/mono/4.5/System.Reactive.Providers.dll
+%_prefix/lib/mono/4.5/System.Reactive.Runtime.Remoting.dll
+%_prefix/lib/mono/gac/System.Reactive.Core
+%_prefix/lib/mono/gac/System.Reactive.Debugger
+%_prefix/lib/mono/gac/System.Reactive.Experimental
+%_prefix/lib/mono/gac/System.Reactive.Interfaces
+%_prefix/lib/mono/gac/System.Reactive.Linq
+%_prefix/lib/mono/gac/System.Reactive.PlatformServices
+%_prefix/lib/mono/gac/System.Reactive.Providers
+%_prefix/lib/mono/gac/System.Reactive.Runtime.Remoting
+
+%package -n mono-rx-desktop
+License: MIT License (or similar) ; Apache License 2.0
+Summary: Reactive Extensions for Mono desktop-specific libraries
+Group: Development/Languages/Mono
+Requires: mono-core == %version-%release
+Requires: mono-rx-core == %version-%release
+Provides: mono(System.Reactive.Interfaces) = 1.0.5000.0
+
+%description -n mono-rx-desktop
+The Mono Project is an open development initiative that is working to
+develop an open source, Unix version of the .NET development platform.
+Its objective is to enable Unix developers to build and deploy
+cross-platform .NET applications. The project will implement various
+technologies that have been submitted to the ECMA for standardization.
+
+Reactive Extensions for Mono, desktop-specific packages (winforms,
+windows threading).
+
+%files -n mono-rx-desktop
+%defattr(-, root, root)
+%_prefix/lib/mono/4.5/System.Reactive.Windows.Forms.dll
+%_prefix/lib/mono/4.5/System.Reactive.Windows.Threading.dll
+%_prefix/lib/mono/gac/System.Reactive.Windows.Forms
+%_prefix/lib/mono/gac/System.Reactive.Windows.Threading
+
%package -n mono-nunit
License: LGPL v2.1 only
Summary: NUnit Testing Framework
sgen-marksweep-fixed.c \
sgen-marksweep-par.c \
sgen-marksweep-fixed-par.c \
+ sgen-marksweep-conc.c \
sgen-major-copying.c \
sgen-los.c \
sgen-protocol.c \
return NULL;
}
+gboolean
+mono_gc_card_table_nursery_check (void)
+{
+ g_assert_not_reached ();
+ return TRUE;
+}
+
void*
mono_gc_get_nursery (int *shift_bits, size_t *size)
{
int mono_gc_get_los_limit (void) MONO_INTERNAL;
guint8* mono_gc_get_card_table (int *shift_bits, gpointer *card_mask) MONO_INTERNAL;
+gboolean mono_gc_card_table_nursery_check (void) MONO_INTERNAL;
void* mono_gc_get_nursery (int *shift_bits, size_t *size) MONO_INTERNAL;
#include <mono/utils/mono-semaphore.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-counters.h>
+#include <mono/utils/dtrace.h>
#ifndef HOST_WIN32
#include <pthread.h>
mono_runtime_class_init (o->vtable);
+ if (G_UNLIKELY (MONO_GC_FINALIZE_INVOKE_ENABLED ())) {
+ MONO_GC_FINALIZE_INVOKE ((unsigned long)o, mono_object_get_size (o),
+ o->vtable->klass->name_space, o->vtable->klass->name);
+ }
+
runtime_invoke (o, NULL, &exc, NULL);
if (exc)
}
handles->bitmap [slot] |= 1 << i;
slot = slot * 32 + i;
- handles->entries [slot] = obj;
+ handles->entries [slot] = NULL;
if (handles->type <= HANDLE_WEAK_TRACK) {
/*FIXME, what to use when obj == null?*/
handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id;
if (obj)
mono_gc_weak_link_add (&(handles->entries [slot]), obj, track);
+ } else {
+ handles->entries [slot] = obj;
}
#ifndef DISABLE_PERFCOUNTERS
const char *
mono_metadata_string_heap (MonoImage *meta, guint32 index)
{
+ g_assert (index < meta->heap_strings.size);
g_return_val_if_fail (index < meta->heap_strings.size, "");
return meta->heap_strings.data + index;
}
const char *
mono_metadata_user_string (MonoImage *meta, guint32 index)
{
+ g_assert (index < meta->heap_us.size);
g_return_val_if_fail (index < meta->heap_us.size, "");
return meta->heap_us.data + index;
}
const char *
mono_metadata_blob_heap (MonoImage *meta, guint32 index)
{
+ g_assert (index < meta->heap_blob.size);
g_return_val_if_fail (index < meta->heap_blob.size, "");/*FIXME shouldn't we return NULL and check for index == 0?*/
return meta->heap_blob.data + index;
}
new->owner = id;
new->nest = 1;
+ new->data = NULL;
#ifndef DISABLE_PERFCOUNTERS
mono_perfcounters->gc_sync_blocks++;
return NULL;
}
+gboolean
+mono_gc_card_table_nursery_check (void)
+{
+ g_assert_not_reached ();
+ return TRUE;
+}
+
void*
mono_gc_get_nursery (int *shift_bits, size_t *size)
{
sgen_ensure_free_space (size);
} else {
if (sgen_need_major_collection (size))
- sgen_perform_collection (size, GENERATION_OLD, "mature allocation failure");
+ sgen_perform_collection (size, GENERATION_OLD, "mature allocation failure", TRUE);
}
if (collect_before_allocs) {
if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
- sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered");
+ sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
if (!degraded_mode && sgen_can_alloc_size (size) && size <= SGEN_MAX_SMALL_OBJ_SIZE) {
// FIXME:
g_assert_not_reached ();
p = sgen_los_alloc_large_inner (vtable, size);
} else {
SGEN_ASSERT (9, vtable->klass->inited, "class %s:%s is not initialized", vtable->klass->name_space, vtable->klass->name);
- p = major_collector.alloc_small_pinned_obj (size, SGEN_VTABLE_HAS_REFERENCES (vtable));
+ p = major_collector.alloc_small_pinned_obj (vtable, size, SGEN_VTABLE_HAS_REFERENCES (vtable));
}
if (G_LIKELY (p)) {
SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
else
MONO_GC_MAJOR_OBJ_ALLOC_PINNED ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
binary_protocol_alloc_pinned (p, vtable, size);
- mono_atomic_store_seq (p, vtable);
}
UNLOCK_GC;
return p;
size_t size = ALIGN_UP (vtable->klass->instance_size);
LOCK_GC;
res = alloc_degraded (vtable, size, TRUE);
- mono_atomic_store_seq (res, vtable);
UNLOCK_GC;
if (G_UNLIKELY (vtable->klass->has_finalize))
mono_object_register_finalizer ((MonoObject*)res);
* Author:
* Rodrigo Kumpera (rkumpera@novell.com)
*
- * SGen is licensed under the terms of the MIT X11 license
- *
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "config.h"
SGEN_TV_GETTIME (btv);
last_major_scan_time = SGEN_TV_ELAPSED (atv, btv);
major_card_scan_time += last_major_scan_time;
- sgen_los_scan_card_table (queue);
+ sgen_los_scan_card_table (FALSE, queue);
SGEN_TV_GETTIME (atv);
last_los_scan_time = SGEN_TV_ELAPSED (btv, atv);
los_card_scan_time += last_los_scan_time;
return sgen_cardtable;
}
+gboolean
+mono_gc_card_table_nursery_check (void)
+{
+ return !major_collector.is_concurrent;
+}
+
#if 0
static void
collect_faulted_cards (void)
*/
//#define SGEN_BINARY_PROTOCOL
+/*
+ * This enables checks whenever objects are enqueued in gray queues.
+ * Right now the only check done is that we never enqueue nursery
+ * pointers in the concurrent collector.
+ */
+//#define SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+
+/*
+ * This keeps track of where a gray object queue section is and
+ * whether it is where it should be.
+ */
+//#define SGEN_CHECK_GRAY_OBJECT_SECTIONS
+
/*
* Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
* have cross-domain checks in the write barrier.
*/
//#define XDOMAIN_CHECKS_IN_WBARRIER
+/*
+ * Define this to get number of objects marked information in the
+ * concurrent GC DTrace probes. Has a small performance impact, so
+ * it's disabled by default.
+ */
+//#define SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+
#ifndef SGEN_BINARY_PROTOCOL
#ifndef HEAVY_STATISTICS
#define MANAGED_ALLOCATION
MonoVTable *vt = ((MonoObject*)obj)->vtable;
gboolean has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
mword objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
- char *destination = COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION (obj, objsize, has_references);
+ char *destination = COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION (vt, obj, objsize, has_references);
if (G_UNLIKELY (!destination)) {
collector_pin_object (obj, queue);
return obj;
}
- *(MonoVTable**)destination = vt;
par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
+ /* FIXME: mark mod union cards if necessary */
/* set the forwarding pointer */
SGEN_FORWARD_OBJECT (obj, destination);
mword desc;
int type;
char *start;
+ char *forwarded;
+ restart:
if (sgen_ptr_in_nursery (ptr)) {
printf ("Pointer inside nursery.\n");
} else {
if (object_is_pinned (ptr))
printf ("Object is pinned.\n");
- if (object_is_forwarded (ptr))
- printf ("Object is forwared.\n");
+ if ((forwarded = object_is_forwarded (ptr))) {
+ printf ("Object is forwarded to %p:\n", forwarded);
+ ptr = forwarded;
+ goto restart;
+ }
// FIXME: Handle pointers to the inside of objects
vtable = (MonoVTable*)LOAD_VTABLE (ptr);
#include "metadata/sgen-gc.h"
#include "metadata/sgen-gray.h"
+#include "utils/dtrace.h"
#define ptr_in_nursery sgen_ptr_in_nursery
/* LOCKING: requires that the GC lock is held */
void
-sgen_collect_bridge_objects (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
+sgen_collect_bridge_objects (char *start, char *end, int generation, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
MonoObject *object;
gpointer dummy;
/* LOCKING: requires that the GC lock is held */
void
-sgen_finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, GrayQueue *queue)
+sgen_finalize_in_range (char *start, char *end, int generation, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
MonoObject *object;
gpointer dummy;
/* LOCKING: requires that the GC lock is held */
void
-sgen_null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, GrayQueue *queue)
+sgen_null_link_in_range (char *start, char *end, int generation, gboolean before_finalization, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
void **link;
gpointer dummy;
SgenHashTable *hash = get_dislink_hash_table (generation);
void
sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
{
+ if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
+ MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
+ MONO_GC_WEAK_UPDATE ((mword)link,
+ *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
+ (mword)obj,
+ obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
+ obj ? vt->klass->name_space : NULL,
+ obj ? vt->klass->name : NULL,
+ track ? 1 : 0);
+ }
+
if (obj)
*link = HIDE_POINTER (obj, track);
else
static gboolean whole_heap_check_before_collection = FALSE;
/* If set, do a heap consistency check before each minor collection */
static gboolean consistency_check_at_minor_collection = FALSE;
+/* If set, do a few checks when the concurrent collector is used */
+static gboolean do_concurrent_checks = FALSE;
/* If set, check that there are no references to the domain left at domain unload */
static gboolean xdomain_checks = FALSE;
/* If not null, dump the heap after each collection into this file */
} Ephemeron;
int current_collection_generation = -1;
+volatile gboolean concurrent_collection_in_progress = FALSE;
/* objects that are ready to be finalized */
static FinalizeReadyEntry *fin_ready_list = NULL;
#define GC_ROOT_NUM 32
typedef struct {
- int count;
+ int count; /* must be the first field */
void *objects [GC_ROOT_NUM];
int root_types [GC_ROOT_NUM];
uintptr_t extra_info [GC_ROOT_NUM];
/* forward declarations */
static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
-static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
-static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue);
+static void scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx);
+static void scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx);
static void report_finalizer_roots (void);
static void report_registered_roots (void);
static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
-static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue);
+static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx);
static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
static void init_stats (void);
-static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
-static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue);
+static int mark_ephemerons_in_range (char *start, char *end, ScanCopyContext ctx);
+static void clear_unreachable_ephemerons (char *start, char *end, ScanCopyContext ctx);
static void null_ephemerons_for_domain (MonoDomain *domain);
SgenObjectOperations current_object_ops;
SgenMajorCollector major_collector;
SgenMinorCollector sgen_minor_collector;
static GrayQueue gray_queue;
+static GrayQueue remember_major_objects_gray_queue;
static SgenRemeberedSet remset;
+/* The gray queue to use from the main collection thread. */
+#define WORKERS_DISTRIBUTE_GRAY_QUEUE (&gray_queue)
-#define WORKERS_DISTRIBUTE_GRAY_QUEUE (sgen_collection_is_parallel () ? sgen_workers_get_distribute_gray_queue () : &gray_queue)
-
+/*
+ * The gray queue a worker job must use. If we're not parallel or
+ * concurrent, we use the main gray queue.
+ */
static SgenGrayQueue*
sgen_workers_get_job_gray_queue (WorkerData *worker_data)
{
return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
}
+static gboolean have_non_collection_major_object_remembers = FALSE;
+
+gboolean
+sgen_remember_major_object_for_concurrent_mark (char *obj)
+{
+ if (!major_collector.is_concurrent)
+ return FALSE;
+
+ g_assert (current_collection_generation == GENERATION_NURSERY || current_collection_generation == -1);
+
+ if (!concurrent_collection_in_progress)
+ return FALSE;
+
+ GRAY_OBJECT_ENQUEUE (&remember_major_objects_gray_queue, obj);
+
+ if (current_collection_generation != GENERATION_NURSERY) {
+ /*
+ * This happens when the mutator allocates large or
+ * pinned objects or when allocating in degraded
+ * mode.
+ */
+ have_non_collection_major_object_remembers = TRUE;
+ }
+
+ return TRUE;
+}
+
+static void
+gray_queue_redirect (SgenGrayQueue *queue)
+{
+ gboolean wake = FALSE;
+
+
+ for (;;) {
+ GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
+ if (!section)
+ break;
+ sgen_section_gray_queue_enqueue (queue->alloc_prepare_data, section);
+ wake = TRUE;
+ }
+
+ if (wake) {
+ g_assert (concurrent_collection_in_progress ||
+ (current_collection_generation == GENERATION_OLD && major_collector.is_parallel));
+ if (sgen_workers_have_started ()) {
+ sgen_workers_wake_up_all ();
+ } else {
+ if (concurrent_collection_in_progress)
+ g_assert (current_collection_generation == -1);
+ }
+ }
+}
+
+static void
+redirect_major_object_remembers (void)
+{
+ gray_queue_redirect (&remember_major_objects_gray_queue);
+ have_non_collection_major_object_remembers = FALSE;
+}
+
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
{
major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_xdomain_refs, NULL);
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
- scan_object_for_xdomain_refs (bigobj->data, bigobj->size, NULL);
+ scan_object_for_xdomain_refs (bigobj->data, sgen_los_object_size (bigobj), NULL);
}
static gboolean
* usage.
*/
gboolean
-sgen_drain_gray_stack (GrayQueue *queue, int max_objs)
+sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx)
{
char *obj;
- ScanObjectFunc scan_func = current_object_ops.scan_object;
+ ScanObjectFunc scan_func = ctx.scan_func;
+ GrayQueue *queue = ctx.queue;
if (max_objs == -1) {
for (;;) {
* pinned objects. Return the number of pinned objects.
*/
static int
-pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue)
+pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, ScanCopyContext ctx)
{
void *last = NULL;
int count = 0;
void *addr;
int idx;
void **definitely_pinned = start;
+ ScanObjectFunc scan_func = ctx.scan_func;
+ SgenGrayQueue *queue = ctx.queue;
sgen_nursery_allocator_prepare_for_pinning ();
MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (search_start);
MONO_GC_OBJ_PINNED ((mword)search_start, sgen_safe_object_get_size (search_start), vt->klass->name_space, vt->klass->name, gen);
}
- pin_object (search_start);
- GRAY_OBJECT_ENQUEUE (queue, search_start);
- if (G_UNLIKELY (do_pin_stats))
- sgen_pin_stats_register_object (search_start, last_obj_size);
- definitely_pinned [count] = search_start;
- count++;
+ if (scan_func) {
+ scan_func (search_start, queue);
+ } else {
+ pin_object (search_start);
+ GRAY_OBJECT_ENQUEUE (queue, search_start);
+ if (G_UNLIKELY (do_pin_stats))
+ sgen_pin_stats_register_object (search_start, last_obj_size);
+ definitely_pinned [count] = search_start;
+ count++;
+ }
break;
}
}
}
void
-sgen_pin_objects_in_section (GCMemSection *section, GrayQueue *queue)
+sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx)
{
int num_entries = section->pin_queue_num_entries;
if (num_entries) {
void **start = section->pin_queue_start;
int reduced_to;
reduced_to = pin_objects_from_addresses (section, start, start + num_entries,
- section->data, section->next_data, queue);
+ section->data, section->next_data, ctx);
section->pin_queue_num_entries = reduced_to;
if (!reduced_to)
section->pin_queue_start = NULL;
void
sgen_pin_object (void *object, GrayQueue *queue)
{
+ g_assert (!concurrent_collection_in_progress);
+
if (sgen_collection_is_parallel ()) {
LOCK_PIN_QUEUE;
/*object arrives pinned*/
scan_thread_data (start_nursery, end_nursery, FALSE, queue);
}
+static void
+unpin_objects_from_queue (SgenGrayQueue *queue)
+{
+ for (;;) {
+ char *addr;
+ GRAY_OBJECT_DEQUEUE (queue, addr);
+ if (!addr)
+ break;
+ SGEN_UNPIN_OBJECT (addr);
+ }
+}
+
typedef struct {
CopyOrMarkObjectFunc func;
GrayQueue *queue;
* This function is not thread-safe!
*/
static void
-precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func, void** start_root, void** end_root, char* n_start, char *n_end, mword desc, GrayQueue *queue)
+precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, char *n_end, mword desc, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ SgenGrayQueue *queue = ctx.queue;
+
switch (desc & ROOT_DESC_TYPE_MASK) {
case ROOT_DESC_BITMAP:
desc >>= ROOT_DESC_TYPE_SHIFT;
if ((desc & 1) && *start_root) {
copy_func (start_root, queue);
SGEN_LOG (9, "Overwrote root at %p with %p", start_root, *start_root);
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
}
desc >>= 1;
start_root++;
if ((bmap & 1) && *objptr) {
copy_func (objptr, queue);
SGEN_LOG (9, "Overwrote root at %p with %p", objptr, *objptr);
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
}
bmap >>= 1;
++objptr;
}
static void
-scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue)
+scan_finalizer_entries (FinalizeReadyEntry *list, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ SgenGrayQueue *queue = ctx.queue;
FinalizeReadyEntry *fin;
for (fin = list; fin; fin = fin->next) {
TV_DECLARE (btv);
int done_with_ephemerons, ephemeron_rounds = 0;
CopyOrMarkObjectFunc copy_func = current_object_ops.copy_or_mark_object;
+ ScanObjectFunc scan_func = current_object_ops.scan_object;
+ ScanCopyContext ctx = { scan_func, copy_func, queue };
/*
* We copied all the reachable objects. Now it's the time to copy
* To achieve better cache locality and cache usage, we drain the gray stack
* frequently, after each object is copied, and just finish the work here.
*/
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
TV_GETTIME (atv);
SGEN_LOG (2, "%s generation done", generation_name (generation));
*/
done_with_ephemerons = 0;
do {
- done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
- sgen_drain_gray_stack (queue, -1);
+ done_with_ephemerons = mark_ephemerons_in_range (start_addr, end_addr, ctx);
+ sgen_drain_gray_stack (-1, ctx);
++ephemeron_rounds;
} while (!done_with_ephemerons);
- sgen_scan_togglerefs (copy_func, start_addr, end_addr, queue);
+ sgen_scan_togglerefs (start_addr, end_addr, ctx);
if (generation == GENERATION_OLD)
- sgen_scan_togglerefs (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), queue);
+ sgen_scan_togglerefs (sgen_get_nursery_start (), sgen_get_nursery_end (), ctx);
if (sgen_need_bridge_processing ()) {
- sgen_collect_bridge_objects (copy_func, start_addr, end_addr, generation, queue);
+ sgen_collect_bridge_objects (start_addr, end_addr, generation, ctx);
if (generation == GENERATION_OLD)
- sgen_collect_bridge_objects (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
+ sgen_collect_bridge_objects (sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, ctx);
}
/*
Make sure we drain the gray stack before processing disappearing links and finalizers.
If we don't make sure it is empty we might wrongly see a live object as dead.
*/
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
/*
We must clear weak links that don't track resurrection before processing object ready for
finalization so they can be cleared before that.
*/
- sgen_null_link_in_range (copy_func, start_addr, end_addr, generation, TRUE, queue);
+ sgen_null_link_in_range (start_addr, end_addr, generation, TRUE, ctx);
if (generation == GENERATION_OLD)
- sgen_null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, TRUE, queue);
+ sgen_null_link_in_range (start_addr, end_addr, GENERATION_NURSERY, TRUE, ctx);
/* walk the finalization queue and move also the objects that need to be
* on are also not reclaimed. As with the roots above, only objects in the nursery
* are marked/copied.
*/
- sgen_finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
+ sgen_finalize_in_range (start_addr, end_addr, generation, ctx);
if (generation == GENERATION_OLD)
- sgen_finalize_in_range (copy_func, sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, queue);
+ sgen_finalize_in_range (sgen_get_nursery_start (), sgen_get_nursery_end (), GENERATION_NURSERY, ctx);
/* drain the new stack that might have been created */
SGEN_LOG (6, "Precise scan of gray area post fin");
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
/*
* This must be done again after processing finalizable objects since CWL slots are cleared only after the key is finalized.
*/
done_with_ephemerons = 0;
do {
- done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
- sgen_drain_gray_stack (queue, -1);
+ done_with_ephemerons = mark_ephemerons_in_range (start_addr, end_addr, ctx);
+ sgen_drain_gray_stack (-1, ctx);
++ephemeron_rounds;
} while (!done_with_ephemerons);
* Clear ephemeron pairs with unreachable keys.
* We pass the copy func so we can figure out if an array was promoted or not.
*/
- clear_unreachable_ephemerons (copy_func, start_addr, end_addr, queue);
+ clear_unreachable_ephemerons (start_addr, end_addr, ctx);
TV_GETTIME (btv);
SGEN_LOG (2, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron rounds", generation_name (generation), TV_ELAPSED (atv, btv), ephemeron_rounds);
*/
g_assert (sgen_gray_object_queue_is_empty (queue));
for (;;) {
- sgen_null_link_in_range (copy_func, start_addr, end_addr, generation, FALSE, queue);
+ sgen_null_link_in_range (start_addr, end_addr, generation, FALSE, ctx);
if (generation == GENERATION_OLD)
- sgen_null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, FALSE, queue);
+ sgen_null_link_in_range (start_addr, end_addr, GENERATION_NURSERY, FALSE, ctx);
if (sgen_gray_object_queue_is_empty (queue))
break;
- sgen_drain_gray_stack (queue, -1);
+ sgen_drain_gray_stack (-1, ctx);
}
g_assert (sgen_gray_object_queue_is_empty (queue));
}
static void
-scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
+scan_from_registered_roots (char *addr_start, char *addr_end, int root_type, ScanCopyContext ctx)
{
void **start_root;
RootRecord *root;
SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
- precisely_scan_objects_from (copy_func, start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, queue);
+ precisely_scan_objects_from (start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc, ctx);
} SGEN_HASH_TABLE_FOREACH_END;
}
}
}
+gboolean
+sgen_collection_is_concurrent (void)
+{
+ switch (current_collection_generation) {
+ case GENERATION_NURSERY:
+ return FALSE;
+ case GENERATION_OLD:
+ return major_collector.is_concurrent;
+ default:
+ g_error ("Invalid current generation %d", current_collection_generation);
+ }
+}
+
+gboolean
+sgen_concurrent_collection_in_progress (void)
+{
+ return concurrent_collection_in_progress;
+}
+
typedef struct
{
char *heap_start;
FinishRememberedSetScanJobData *job_data = job_data_untyped;
remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, sgen_workers_get_job_gray_queue (worker_data));
+ sgen_free_internal_dynamic (job_data, sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA);
}
typedef struct
{
- CopyOrMarkObjectFunc func;
+ CopyOrMarkObjectFunc copy_or_mark_func;
+ ScanObjectFunc scan_func;
char *heap_start;
char *heap_end;
int root_type;
job_scan_from_registered_roots (WorkerData *worker_data, void *job_data_untyped)
{
ScanFromRegisteredRootsJobData *job_data = job_data_untyped;
+ ScanCopyContext ctx = { job_data->scan_func, job_data->copy_or_mark_func,
+ sgen_workers_get_job_gray_queue (worker_data) };
- scan_from_registered_roots (job_data->func,
- job_data->heap_start, job_data->heap_end,
- job_data->root_type,
- sgen_workers_get_job_gray_queue (worker_data));
+ scan_from_registered_roots (job_data->heap_start, job_data->heap_end, job_data->root_type, ctx);
+ sgen_free_internal_dynamic (job_data, sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA);
}
typedef struct
scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
sgen_workers_get_job_gray_queue (worker_data));
+ sgen_free_internal_dynamic (job_data, sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA);
}
typedef struct
job_scan_finalizer_entries (WorkerData *worker_data, void *job_data_untyped)
{
ScanFinalizerEntriesJobData *job_data = job_data_untyped;
+ ScanCopyContext ctx = { NULL, current_object_ops.copy_or_mark_object, sgen_workers_get_job_gray_queue (worker_data) };
- scan_finalizer_entries (current_object_ops.copy_or_mark_object,
- job_data->list,
- sgen_workers_get_job_gray_queue (worker_data));
+ scan_finalizer_entries (job_data->list, ctx);
+ sgen_free_internal_dynamic (job_data, sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA);
+}
+
+static void
+job_scan_major_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
+{
+ g_assert (concurrent_collection_in_progress);
+ major_collector.scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
+}
+
+static void
+job_scan_los_mod_union_cardtable (WorkerData *worker_data, void *job_data_untyped)
+{
+ g_assert (concurrent_collection_in_progress);
+ sgen_los_scan_card_table (TRUE, sgen_workers_get_job_gray_queue (worker_data));
}
static void
}
}
+/*
+ * Checks that no objects in the nursery are fowarded or pinned. This
+ * is a precondition to restarting the mutator while doing a
+ * concurrent collection. Note that we don't clear fragments because
+ * we depend on that having happened earlier.
+ */
+static void
+check_nursery_is_clean (void)
+{
+ char *start, *end, *cur;
+
+ start = cur = sgen_get_nursery_start ();
+ end = sgen_get_nursery_end ();
+
+ while (cur < end) {
+ size_t ss, size;
+
+ if (!*(void**)cur) {
+ cur += sizeof (void*);
+ continue;
+ }
+
+ g_assert (!object_is_forwarded (cur));
+ g_assert (!object_is_pinned (cur));
+
+ ss = safe_object_get_size ((MonoObject*)cur);
+ size = ALIGN_UP (safe_object_get_size ((MonoObject*)cur));
+ verify_scan_starts (cur, cur + size);
+
+ cur += size;
+ }
+}
+
+static void
+init_gray_queue (void)
+{
+ if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ()) {
+ sgen_workers_init_distribute_gray_queue ();
+ sgen_gray_object_queue_init_with_alloc_prepare (&gray_queue, NULL,
+ gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
+ } else {
+ sgen_gray_object_queue_init (&gray_queue, NULL);
+ }
+
+ if (major_collector.is_concurrent) {
+ sgen_gray_object_queue_init_with_alloc_prepare (&remember_major_objects_gray_queue, NULL,
+ gray_queue_redirect, sgen_workers_get_distribute_section_gray_queue ());
+ } else {
+ sgen_gray_object_queue_init_invalid (&remember_major_objects_gray_queue);
+ }
+}
+
/*
* Collect objects in the nursery. Returns whether to trigger a major
* collection.
*/
static gboolean
-collect_nursery (void)
+collect_nursery (SgenGrayQueue *unpin_queue)
{
gboolean needs_major;
size_t max_garbage_amount;
char *nursery_next;
- FinishRememberedSetScanJobData frssjd;
- ScanFromRegisteredRootsJobData scrrjd_normal, scrrjd_wbarrier;
- ScanFinalizerEntriesJobData sfejd_fin_ready, sfejd_critical_fin;
- ScanThreadDataJobData stdjd;
+ FinishRememberedSetScanJobData *frssjd;
+ ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
+ ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
+ ScanThreadDataJobData *stdjd;
mword fragment_total;
+ ScanCopyContext ctx;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
return TRUE;
MONO_GC_BEGIN (GENERATION_NURSERY);
+ binary_protocol_collection_begin (stat_minor_gcs, GENERATION_NURSERY);
verify_nursery ();
reset_pinned_from_failed_allocation ();
- binary_protocol_collection (stat_minor_gcs, GENERATION_NURSERY);
check_scan_starts ();
sgen_nursery_alloc_prepare_for_minor ();
sgen_memgov_minor_collection_start ();
- sgen_gray_object_queue_init (&gray_queue);
- sgen_workers_init_distribute_gray_queue ();
+ init_gray_queue ();
stat_minor_gcs++;
gc_stats.minor_gc_count ++;
/* identify pinned objects */
sgen_optimize_pin_queue (0);
sgen_pinning_setup_section (nursery_section);
- sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ ctx.scan_func = NULL;
+ ctx.copy_func = NULL;
+ ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
+ sgen_pin_objects_in_section (nursery_section, ctx);
sgen_pinning_trim_queue_to_section (nursery_section);
TV_GETTIME (atv);
SGEN_LOG (2, "Finding pinned pointers: %d in %d usecs", sgen_get_pinned_count (), TV_ELAPSED (btv, atv));
SGEN_LOG (4, "Start scan with %d pinned objects", sgen_get_pinned_count ());
- if (whole_heap_check_before_collection)
+ if (whole_heap_check_before_collection) {
+ sgen_clear_nursery_fragments ();
sgen_check_whole_heap ();
+ }
if (consistency_check_at_minor_collection)
sgen_check_consistency ();
sgen_workers_start_marking ();
- frssjd.heap_start = sgen_get_nursery_start ();
- frssjd.heap_end = nursery_next;
- sgen_workers_enqueue_job (job_finish_remembered_set_scan, &frssjd);
+ frssjd = sgen_alloc_internal_dynamic (sizeof (FinishRememberedSetScanJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ frssjd->heap_start = sgen_get_nursery_start ();
+ frssjd->heap_end = nursery_next;
+ sgen_workers_enqueue_job (job_finish_remembered_set_scan, frssjd);
/* we don't have complete write barrier yet, so we scan all the old generation sections */
TV_GETTIME (btv);
time_minor_scan_remsets += TV_ELAPSED (atv, btv);
SGEN_LOG (2, "Old generation scan: %d usecs", TV_ELAPSED (atv, btv));
- if (!sgen_collection_is_parallel ())
- sgen_drain_gray_stack (&gray_queue, -1);
+ if (!sgen_collection_is_parallel ()) {
+ ctx.scan_func = current_object_ops.scan_object;
+ ctx.copy_func = NULL;
+ ctx.queue = &gray_queue;
+ sgen_drain_gray_stack (-1, ctx);
+ }
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_registered_roots ();
time_minor_scan_pinned += TV_ELAPSED (btv, atv);
/* registered roots, this includes static fields */
- scrrjd_normal.func = current_object_ops.copy_or_mark_object;
- scrrjd_normal.heap_start = sgen_get_nursery_start ();
- scrrjd_normal.heap_end = nursery_next;
- scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
- sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
-
- scrrjd_wbarrier.func = current_object_ops.copy_or_mark_object;
- scrrjd_wbarrier.heap_start = sgen_get_nursery_start ();
- scrrjd_wbarrier.heap_end = nursery_next;
- scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
- sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
+ scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
+ scrrjd_normal->scan_func = current_object_ops.scan_object;
+ scrrjd_normal->heap_start = sgen_get_nursery_start ();
+ scrrjd_normal->heap_end = nursery_next;
+ scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
+ sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
+
+ scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
+ scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
+ scrrjd_wbarrier->heap_start = sgen_get_nursery_start ();
+ scrrjd_wbarrier->heap_end = nursery_next;
+ scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
+ sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
TV_GETTIME (btv);
time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
/* thread data */
- stdjd.heap_start = sgen_get_nursery_start ();
- stdjd.heap_end = nursery_next;
- sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
+ stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ stdjd->heap_start = sgen_get_nursery_start ();
+ stdjd->heap_end = nursery_next;
+ sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
TV_GETTIME (atv);
time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
btv = atv;
- if (sgen_collection_is_parallel ()) {
- while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
- sgen_workers_distribute_gray_queue_sections ();
- g_usleep (1000);
- }
- }
- sgen_workers_join ();
+ g_assert (!sgen_collection_is_parallel () && !sgen_collection_is_concurrent ());
- if (sgen_collection_is_parallel ())
+ if (sgen_collection_is_parallel () || sgen_collection_is_concurrent ())
g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
/* Scan the list of objects ready for finalization. If */
- sfejd_fin_ready.list = fin_ready_list;
- sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
+ sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ sfejd_fin_ready->list = fin_ready_list;
+ sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
- sfejd_critical_fin.list = critical_fin_list;
- sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
+ sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ sfejd_critical_fin->list = critical_fin_list;
+ sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
finish_gray_stack (sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
TV_GETTIME (atv);
* next allocations.
*/
mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
- fragment_total = sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
+ fragment_total = sgen_build_nursery_fragments (nursery_section,
+ nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries,
+ unpin_queue);
if (!fragment_total)
degraded_mode = 1;
objects_pinned = 0;
MONO_GC_END (GENERATION_NURSERY);
+ binary_protocol_collection_end (stat_minor_gcs - 1, GENERATION_NURSERY);
return needs_major;
}
-static gboolean
-major_do_collection (const char *reason)
+static void
+major_copy_or_mark_from_roots (int *old_next_pin_slot, gboolean finish_up_concurrent_mark, gboolean scan_mod_union)
{
- LOSObject *bigobj, *prevbo;
- TV_DECLARE (all_atv);
- TV_DECLARE (all_btv);
+ LOSObject *bigobj;
TV_DECLARE (atv);
TV_DECLARE (btv);
/* FIXME: only use these values for the precise scan
*/
char *heap_start = NULL;
char *heap_end = (char*)-1;
- int old_next_pin_slot;
- ScanFromRegisteredRootsJobData scrrjd_normal, scrrjd_wbarrier;
- ScanThreadDataJobData stdjd;
- ScanFinalizerEntriesJobData sfejd_fin_ready, sfejd_critical_fin;
-
- MONO_GC_BEGIN (GENERATION_OLD);
-
- current_collection_generation = GENERATION_OLD;
-#ifndef DISABLE_PERFCOUNTERS
- mono_perfcounters->gc_collections1++;
-#endif
-
- current_object_ops = major_collector.major_ops;
-
- reset_pinned_from_failed_allocation ();
-
- sgen_memgov_major_collection_start ();
-
- //count_ref_nonref_objs ();
- //consistency_check ();
-
- binary_protocol_collection (stat_major_gcs, GENERATION_OLD);
- check_scan_starts ();
+ gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ GCRootReport root_report = { 0 };
+ ScanFromRegisteredRootsJobData *scrrjd_normal, *scrrjd_wbarrier;
+ ScanThreadDataJobData *stdjd;
+ ScanFinalizerEntriesJobData *sfejd_fin_ready, *sfejd_critical_fin;
+ ScanCopyContext ctx;
+
+ if (major_collector.is_concurrent) {
+ /*This cleans up unused fragments */
+ sgen_nursery_allocator_prepare_for_pinning ();
+
+ if (do_concurrent_checks)
+ check_nursery_is_clean ();
+ } else {
+ /* The concurrent collector doesn't touch the nursery. */
+ sgen_nursery_alloc_prepare_for_major ();
+ }
- sgen_gray_object_queue_init (&gray_queue);
- sgen_workers_init_distribute_gray_queue ();
- sgen_nursery_alloc_prepare_for_major ();
+ init_gray_queue ();
- degraded_mode = 0;
- SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
- stat_major_gcs++;
- gc_stats.major_gc_count ++;
-
- /* world must be stopped already */
- TV_GETTIME (all_atv);
- atv = all_atv;
+ TV_GETTIME (atv);
/* Pinning depends on this */
sgen_clear_nursery_fragments ();
TV_GETTIME (btv);
time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
- nursery_section->next_data = sgen_get_nursery_end ();
+ if (!sgen_collection_is_concurrent ())
+ nursery_section->next_data = sgen_get_nursery_end ();
/* we should also coalesce scanning from sections close to each other
* and deal with pointers outside of the sections later.
*/
- if (major_collector.start_major_collection)
- major_collector.start_major_collection ();
-
objects_pinned = 0;
*major_collector.have_swept = FALSE;
check_for_xdomain_refs ();
}
- /* Remsets are not useful for a major collection */
- remset.prepare_for_major_collection ();
+ if (!major_collector.is_concurrent) {
+ /* Remsets are not useful for a major collection */
+ remset.prepare_for_major_collection ();
+ }
sgen_process_fin_stage_entries ();
sgen_process_dislink_stage_entries ();
pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
sgen_optimize_pin_queue (0);
+ /*
+ * The concurrent collector doesn't move objects, neither on
+ * the major heap nor in the nursery, so we can mark even
+ * before pinning has finished. For the non-concurrent
+ * collector we start the workers after pinning.
+ */
+ if (major_collector.is_concurrent) {
+ sgen_workers_start_all_workers ();
+ sgen_workers_start_marking ();
+ }
+
/*
* pin_queue now contains all candidate pointers, sorted and
* uniqued. We must do two passes now to figure out which
SGEN_LOG (6, "Pinning from large objects");
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
int dummy;
- gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
- GCRootReport report;
- report.count = 0;
- if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &dummy)) {
+ if (sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + sgen_los_object_size (bigobj), &dummy)) {
binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (((MonoObject*)(bigobj->data))));
if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (bigobj->data);
MONO_GC_OBJ_PINNED ((mword)bigobj->data, sgen_safe_object_get_size ((MonoObject*)bigobj->data), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
}
- pin_object (bigobj->data);
+ if (sgen_los_object_is_pinned (bigobj->data)) {
+ g_assert (finish_up_concurrent_mark);
+ continue;
+ }
+ sgen_los_pin_object (bigobj->data);
/* FIXME: only enqueue if object has references */
GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE, bigobj->data);
if (G_UNLIKELY (do_pin_stats))
sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
- SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size);
-
+ SGEN_LOG (6, "Marked large object %p (%s) size: %lu from roots", bigobj->data, safe_name (bigobj->data), (unsigned long)sgen_los_object_size (bigobj));
+
if (profile_roots)
- add_profile_gc_root (&report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
+ add_profile_gc_root (&root_report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
}
- if (profile_roots)
- notify_gc_roots (&report);
}
+ if (profile_roots)
+ notify_gc_roots (&root_report);
/* second pass for the sections */
- sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ ctx.scan_func = concurrent_collection_in_progress ? current_object_ops.scan_object : NULL;
+ ctx.copy_func = NULL;
+ ctx.queue = WORKERS_DISTRIBUTE_GRAY_QUEUE;
+ sgen_pin_objects_in_section (nursery_section, ctx);
major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
- old_next_pin_slot = sgen_get_pinned_count ();
+ if (old_next_pin_slot)
+ *old_next_pin_slot = sgen_get_pinned_count ();
TV_GETTIME (btv);
time_major_pinning += TV_ELAPSED (atv, btv);
main_gc_thread = mono_native_thread_self ();
#endif
- sgen_workers_start_all_workers ();
- sgen_workers_start_marking ();
+ if (!major_collector.is_concurrent) {
+ sgen_workers_start_all_workers ();
+ sgen_workers_start_marking ();
+ }
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_registered_roots ();
time_major_scan_pinned += TV_ELAPSED (btv, atv);
/* registered roots, this includes static fields */
- scrrjd_normal.func = current_object_ops.copy_or_mark_object;
- scrrjd_normal.heap_start = heap_start;
- scrrjd_normal.heap_end = heap_end;
- scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
- sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
-
- scrrjd_wbarrier.func = current_object_ops.copy_or_mark_object;
- scrrjd_wbarrier.heap_start = heap_start;
- scrrjd_wbarrier.heap_end = heap_end;
- scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
- sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
+ scrrjd_normal = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ scrrjd_normal->copy_or_mark_func = current_object_ops.copy_or_mark_object;
+ scrrjd_normal->scan_func = current_object_ops.scan_object;
+ scrrjd_normal->heap_start = heap_start;
+ scrrjd_normal->heap_end = heap_end;
+ scrrjd_normal->root_type = ROOT_TYPE_NORMAL;
+ sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_normal);
+
+ scrrjd_wbarrier = sgen_alloc_internal_dynamic (sizeof (ScanFromRegisteredRootsJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ scrrjd_wbarrier->copy_or_mark_func = current_object_ops.copy_or_mark_object;
+ scrrjd_wbarrier->scan_func = current_object_ops.scan_object;
+ scrrjd_wbarrier->heap_start = heap_start;
+ scrrjd_wbarrier->heap_end = heap_end;
+ scrrjd_wbarrier->root_type = ROOT_TYPE_WBARRIER;
+ sgen_workers_enqueue_job (job_scan_from_registered_roots, scrrjd_wbarrier);
TV_GETTIME (btv);
time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
/* Threads */
- stdjd.heap_start = heap_start;
- stdjd.heap_end = heap_end;
- sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
+ stdjd = sgen_alloc_internal_dynamic (sizeof (ScanThreadDataJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ stdjd->heap_start = heap_start;
+ stdjd->heap_end = heap_end;
+ sgen_workers_enqueue_job (job_scan_thread_data, stdjd);
TV_GETTIME (atv);
time_major_scan_thread_data += TV_ELAPSED (btv, atv);
report_finalizer_roots ();
/* scan the list of objects ready for finalization */
- sfejd_fin_ready.list = fin_ready_list;
- sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
+ sfejd_fin_ready = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ sfejd_fin_ready->list = fin_ready_list;
+ sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_fin_ready);
+
+ sfejd_critical_fin = sgen_alloc_internal_dynamic (sizeof (ScanFinalizerEntriesJobData), INTERNAL_MEM_WORKER_JOB_DATA, TRUE);
+ sfejd_critical_fin->list = critical_fin_list;
+ sgen_workers_enqueue_job (job_scan_finalizer_entries, sfejd_critical_fin);
- sfejd_critical_fin.list = critical_fin_list;
- sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
+ if (scan_mod_union) {
+ g_assert (finish_up_concurrent_mark);
+
+ /* Mod union card table */
+ sgen_workers_enqueue_job (job_scan_major_mod_union_cardtable, NULL);
+ sgen_workers_enqueue_job (job_scan_los_mod_union_cardtable, NULL);
+ }
TV_GETTIME (atv);
time_major_scan_finalized += TV_ELAPSED (btv, atv);
TV_GETTIME (btv);
time_major_scan_big_objects += TV_ELAPSED (atv, btv);
- if (major_collector.is_parallel) {
- while (!sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
- sgen_workers_distribute_gray_queue_sections ();
- g_usleep (1000);
- }
+ if (major_collector.is_concurrent) {
+ /* prepare the pin queue for the next collection */
+ sgen_finish_pinning ();
+
+ sgen_pin_stats_reset ();
+
+ if (do_concurrent_checks)
+ check_nursery_is_clean ();
+ }
+}
+
+static void
+major_start_collection (int *old_next_pin_slot)
+{
+ MONO_GC_BEGIN (GENERATION_OLD);
+ binary_protocol_collection_begin (stat_major_gcs, GENERATION_OLD);
+
+ current_collection_generation = GENERATION_OLD;
+#ifndef DISABLE_PERFCOUNTERS
+ mono_perfcounters->gc_collections1++;
+#endif
+
+ g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
+ if (major_collector.is_concurrent)
+ concurrent_collection_in_progress = TRUE;
+
+ current_object_ops = major_collector.major_ops;
+
+ reset_pinned_from_failed_allocation ();
+
+ sgen_memgov_major_collection_start ();
+
+ //count_ref_nonref_objs ();
+ //consistency_check ();
+
+ check_scan_starts ();
+
+ degraded_mode = 0;
+ SGEN_LOG (1, "Start major collection %d", stat_major_gcs);
+ stat_major_gcs++;
+ gc_stats.major_gc_count ++;
+
+ if (major_collector.start_major_collection)
+ major_collector.start_major_collection ();
+
+ major_copy_or_mark_from_roots (old_next_pin_slot, FALSE, FALSE);
+}
+
+static void
+wait_for_workers_to_finish (void)
+{
+ g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
+
+ if (major_collector.is_parallel || major_collector.is_concurrent) {
+ gray_queue_redirect (&gray_queue);
+ sgen_workers_join ();
}
- sgen_workers_join ();
+
+ g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
#ifdef SGEN_DEBUG_INTERNAL_ALLOC
main_gc_thread = NULL;
#endif
+}
+
+static void
+major_finish_collection (const char *reason, int old_next_pin_slot, gboolean scan_mod_union)
+{
+ LOSObject *bigobj, *prevbo;
+ TV_DECLARE (atv);
+ TV_DECLARE (btv);
+ char *heap_start = NULL;
+ char *heap_end = (char*)-1;
+
+ TV_GETTIME (btv);
+
+ if (major_collector.is_concurrent || major_collector.is_parallel)
+ wait_for_workers_to_finish ();
+
+ current_object_ops = major_collector.major_ops;
+
+ if (major_collector.is_concurrent) {
+ major_copy_or_mark_from_roots (NULL, TRUE, scan_mod_union);
+ wait_for_workers_to_finish ();
- if (major_collector.is_parallel)
g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+ if (do_concurrent_checks)
+ check_nursery_is_clean ();
+ }
+
+ /*
+ * The workers have stopped so we need to finish gray queue
+ * work that might result from finalization in the main GC
+ * thread. Redirection must therefore be turned off.
+ */
+ sgen_gray_object_queue_disable_alloc_prepare (&gray_queue);
+ g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
/* all the objects in the heap */
finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
TV_GETTIME (atv);
sgen_workers_reset_data ();
if (objects_pinned) {
+ g_assert (!major_collector.is_concurrent);
+
/*This is slow, but we just OOM'd*/
sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
sgen_optimize_pin_queue (0);
reset_heap_boundaries ();
sgen_update_heap_boundaries ((mword)sgen_get_nursery_start (), (mword)sgen_get_nursery_end ());
+ MONO_GC_SWEEP_BEGIN (GENERATION_OLD, !major_collector.sweeps_lazily);
+
/* sweep the big objects list */
prevbo = NULL;
for (bigobj = los_object_list; bigobj;) {
- if (object_is_pinned (bigobj->data)) {
- unpin_object (bigobj->data);
- sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + bigobj->size);
+ g_assert (!object_is_pinned (bigobj->data));
+ if (sgen_los_object_is_pinned (bigobj->data)) {
+ sgen_los_unpin_object (bigobj->data);
+ sgen_update_heap_boundaries ((mword)bigobj->data, (mword)bigobj->data + sgen_los_object_size (bigobj));
} else {
LOSObject *to_free;
/* not referenced anywhere, so we can free it */
major_collector.sweep ();
+ MONO_GC_SWEEP_END (GENERATION_OLD, !major_collector.sweeps_lazily);
+
TV_GETTIME (btv);
time_major_sweep += TV_ELAPSED (atv, btv);
- /* walk the pin_queue, build up the fragment list of free memory, unmark
- * pinned objects as we go, memzero() the empty fragments so they are ready for the
- * next allocations.
- */
- if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries))
- degraded_mode = 1;
+ if (!major_collector.is_concurrent) {
+ /* walk the pin_queue, build up the fragment list of free memory, unmark
+ * pinned objects as we go, memzero() the empty fragments so they are ready for the
+ * next allocations.
+ */
+ if (!sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries, NULL))
+ degraded_mode = 1;
- /* Clear TLABs for all threads */
- sgen_clear_tlabs ();
+ /* prepare the pin queue for the next collection */
+ sgen_finish_pinning ();
+
+ /* Clear TLABs for all threads */
+ sgen_clear_tlabs ();
+
+ sgen_pin_stats_reset ();
+ }
TV_GETTIME (atv);
time_major_fragment_creation += TV_ELAPSED (btv, atv);
- TV_GETTIME (all_btv);
- gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
-
if (heap_dump_file)
dump_heap ("major", stat_major_gcs - 1, reason);
- /* prepare the pin queue for the next collection */
- sgen_finish_pinning ();
-
if (fin_ready_list || critical_fin_list) {
SGEN_LOG (4, "Finalizer-thread wakeup: ready %d", num_ready_finalizers);
mono_gc_finalize_notify ();
}
- sgen_pin_stats_reset ();
g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
major_collector.finish_major_collection ();
+ g_assert (sgen_section_gray_queue_is_empty (sgen_workers_get_distribute_section_gray_queue ()));
+
+ if (major_collector.is_concurrent)
+ concurrent_collection_in_progress = FALSE;
+
check_scan_starts ();
binary_protocol_flush_buffers (FALSE);
//consistency_check ();
MONO_GC_END (GENERATION_OLD);
+ binary_protocol_collection_end (stat_major_gcs - 1, GENERATION_OLD);
+}
+
+static gboolean
+major_do_collection (const char *reason)
+{
+ TV_DECLARE (all_atv);
+ TV_DECLARE (all_btv);
+ int old_next_pin_slot;
+
+ if (major_collector.get_and_reset_num_major_objects_marked) {
+ long long num_marked = major_collector.get_and_reset_num_major_objects_marked ();
+ g_assert (!num_marked);
+ }
+
+ /* world must be stopped already */
+ TV_GETTIME (all_atv);
+
+ major_start_collection (&old_next_pin_slot);
+ major_finish_collection (reason, old_next_pin_slot, FALSE);
+
+ TV_GETTIME (all_btv);
+ gc_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
+
+ /* FIXME: also report this to the user, preferably in gc-end. */
+ if (major_collector.get_and_reset_num_major_objects_marked)
+ major_collector.get_and_reset_num_major_objects_marked ();
return bytes_pinned_from_failed_allocation > 0;
}
static gboolean major_do_collection (const char *reason);
+static void
+major_start_concurrent_collection (const char *reason)
+{
+ long long num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
+
+ g_assert (num_objects_marked == 0);
+
+ MONO_GC_CONCURRENT_START_BEGIN (GENERATION_OLD);
+
+ // FIXME: store reason and pass it when finishing
+ major_start_collection (NULL);
+
+ gray_queue_redirect (&gray_queue);
+ sgen_workers_wait_for_jobs ();
+
+ num_objects_marked = major_collector.get_and_reset_num_major_objects_marked ();
+ MONO_GC_CONCURRENT_START_END (GENERATION_OLD, num_objects_marked);
+
+ current_collection_generation = -1;
+}
+
+static gboolean
+major_update_or_finish_concurrent_collection (gboolean force_finish)
+{
+ SgenGrayQueue unpin_queue;
+ memset (&unpin_queue, 0, sizeof (unpin_queue));
+
+ MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
+
+ g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+ if (!have_non_collection_major_object_remembers)
+ g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
+
+ major_collector.update_cardtable_mod_union ();
+ sgen_los_update_cardtable_mod_union ();
+
+ if (!force_finish && !sgen_workers_all_done ()) {
+ MONO_GC_CONCURRENT_UPDATE_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
+ return FALSE;
+ }
+
+ collect_nursery (&unpin_queue);
+ redirect_major_object_remembers ();
+
+ current_collection_generation = GENERATION_OLD;
+ major_finish_collection ("finishing", -1, TRUE);
+
+ unpin_objects_from_queue (&unpin_queue);
+ sgen_gray_object_queue_deinit (&unpin_queue);
+
+ MONO_GC_CONCURRENT_FINISH_END (GENERATION_OLD, major_collector.get_and_reset_num_major_objects_marked ());
+
+ current_collection_generation = -1;
+
+ if (whole_heap_check_before_collection)
+ sgen_check_whole_heap ();
+
+ return TRUE;
+}
+
/*
* Ensure an allocation request for @size will succeed by freeing enough memory.
*
if (generation_to_collect == -1)
return;
- sgen_perform_collection (size, generation_to_collect, reason);
+ sgen_perform_collection (size, generation_to_collect, reason, generation_to_collect == GENERATION_NURSERY);
}
void
-sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason)
+sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish)
{
TV_DECLARE (gc_end);
GGTimingInfo infos [2];
int overflow_generation_to_collect = -1;
+ int oldest_generation_collected = generation_to_collect;
const char *overflow_reason = NULL;
+ g_assert (generation_to_collect == GENERATION_NURSERY || generation_to_collect == GENERATION_OLD);
+
+ if (have_non_collection_major_object_remembers) {
+ g_assert (concurrent_collection_in_progress);
+ redirect_major_object_remembers ();
+ }
+
memset (infos, 0, sizeof (infos));
mono_profiler_gc_event (MONO_GC_EVENT_START, generation_to_collect);
infos [1].generation = -1;
sgen_stop_world (generation_to_collect);
+
+ if (concurrent_collection_in_progress) {
+ if (major_update_or_finish_concurrent_collection (generation_to_collect == GENERATION_OLD)) {
+ oldest_generation_collected = GENERATION_OLD;
+ goto done;
+ }
+ }
+
//FIXME extract overflow reason
if (generation_to_collect == GENERATION_NURSERY) {
- if (collect_nursery ()) {
+ if (collect_nursery (NULL)) {
overflow_generation_to_collect = GENERATION_OLD;
overflow_reason = "Minor overflow";
}
+ if (concurrent_collection_in_progress) {
+ redirect_major_object_remembers ();
+ sgen_workers_wake_up_all ();
+ }
} else {
- if (major_do_collection (reason)) {
- overflow_generation_to_collect = GENERATION_NURSERY;
- overflow_reason = "Excessive pinning";
+ SgenGrayQueue unpin_queue;
+ SgenGrayQueue *unpin_queue_ptr;
+ memset (&unpin_queue, 0, sizeof (unpin_queue));
+
+ if (major_collector.is_concurrent && wait_to_finish)
+ unpin_queue_ptr = &unpin_queue;
+ else
+ unpin_queue_ptr = NULL;
+
+ if (major_collector.is_concurrent) {
+ g_assert (!concurrent_collection_in_progress);
+ collect_nursery (unpin_queue_ptr);
+ }
+
+ if (major_collector.is_concurrent && !wait_to_finish) {
+ major_start_concurrent_collection (reason);
+ // FIXME: set infos[0] properly
+ goto done;
+ } else {
+ if (major_do_collection (reason)) {
+ overflow_generation_to_collect = GENERATION_NURSERY;
+ overflow_reason = "Excessive pinning";
+ }
+ }
+
+ if (unpin_queue_ptr) {
+ unpin_objects_from_queue (unpin_queue_ptr);
+ sgen_gray_object_queue_deinit (unpin_queue_ptr);
}
}
infos [0].total_time = SGEN_TV_ELAPSED (infos [0].total_time, gc_end);
- if (overflow_generation_to_collect != -1) {
+ if (!major_collector.is_concurrent && overflow_generation_to_collect != -1) {
mono_profiler_gc_event (MONO_GC_EVENT_START, overflow_generation_to_collect);
infos [1].generation = overflow_generation_to_collect;
infos [1].reason = overflow_reason;
infos [1].total_time = gc_end;
if (overflow_generation_to_collect == GENERATION_NURSERY)
- collect_nursery ();
+ collect_nursery (NULL);
else
major_do_collection (overflow_reason);
/* keep events symmetric */
mono_profiler_gc_event (MONO_GC_EVENT_END, overflow_generation_to_collect);
+
+ oldest_generation_collected = MAX (oldest_generation_collected, overflow_generation_to_collect);
}
SGEN_LOG (2, "Heap size: %lu, LOS size: %lu", (unsigned long)mono_gc_get_heap_size (), (unsigned long)los_memory_usage);
degraded_mode = 1;
}
- sgen_restart_world (generation_to_collect, infos);
+ done:
+ g_assert (sgen_gray_object_queue_is_empty (&gray_queue));
+ g_assert (sgen_gray_object_queue_is_empty (&remember_major_objects_gray_queue));
+
+ sgen_restart_world (oldest_generation_collected, infos);
mono_profiler_gc_event (MONO_GC_EVENT_END, generation_to_collect);
}
static inline gboolean
sgen_is_object_alive (void *object)
{
+ mword objsize;
+
if (ptr_in_nursery (object))
return sgen_nursery_is_object_alive (object);
/* Oldgen objects can be pinned and forwarded too */
if (SGEN_OBJECT_IS_PINNED (object) || SGEN_OBJECT_IS_FORWARDED (object))
return TRUE;
+
+ /*
+ * FIXME: major_collector.is_object_live() also calculates the
+ * size. Avoid the double calculation.
+ */
+ objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)object));
+ if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
+ return sgen_los_object_is_pinned (object);
+
return major_collector.is_object_live (object);
}
sgen_queue_finalization_entry (MonoObject *obj)
{
FinalizeReadyEntry *entry = sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
+ gboolean critical = has_critical_finalizer (obj);
entry->object = obj;
- if (has_critical_finalizer (obj)) {
+ if (critical) {
entry->next = critical_fin_list;
critical_fin_list = entry;
} else {
entry->next = fin_ready_list;
fin_ready_list = entry;
}
+
+ if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
+ int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
+ MonoVTable *vt = (MonoVTable*)LOAD_VTABLE (obj);
+ MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
+ vt->klass->name_space, vt->klass->name, gen, critical);
+ }
}
static inline int
/* LOCKING: requires that the GC lock is held */
static void
-clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
+clear_unreachable_ephemerons (char *start, char *end, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
int was_in_nursery, was_promoted;
EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
MonoArray *array;
/* LOCKING: requires that the GC lock is held */
static int
-mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
+mark_ephemerons_in_range (char *start, char *end, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ GrayQueue *queue = ctx.queue;
int nothing_marked = 1;
EphemeronLinkNode *current = ephemeron_list;
MonoArray *array;
void
mono_gc_wbarrier_generic_nostore (gpointer ptr)
{
+ gpointer obj;
+
HEAVY_STAT (++stat_wbarrier_generic_store);
#ifdef XDOMAIN_CHECKS_IN_WBARRIER
}
#endif
- if (*(gpointer*)ptr)
- binary_protocol_wbarrier (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (*(gpointer*)ptr));
+ obj = *(gpointer*)ptr;
+ if (obj)
+ binary_protocol_wbarrier (ptr, obj, (gpointer)LOAD_VTABLE (obj));
- if (ptr_in_nursery (ptr) || ptr_on_stack (ptr) || !ptr_in_nursery (*(gpointer*)ptr)) {
+ if (ptr_in_nursery (ptr) || ptr_on_stack (ptr)) {
+ SGEN_LOG (8, "Skipping remset at %p", ptr);
+ return;
+ }
+
+ /*
+ * We need to record old->old pointer locations for the
+ * concurrent collector.
+ */
+ if (!ptr_in_nursery (obj) && !concurrent_collection_in_progress) {
SGEN_LOG (8, "Skipping remset at %p", ptr);
return;
}
LOCK_GC;
if (generation > 1)
generation = 1;
- sgen_perform_collection (0, generation, "user request");
+ sgen_perform_collection (0, generation, "user request", TRUE);
UNLOCK_GC;
}
sgen_marksweep_par_init (&major_collector);
} else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-fixed-par")) {
sgen_marksweep_fixed_par_init (&major_collector);
+ } else if (!major_collector_opt || !strcmp (major_collector_opt, "marksweep-conc")) {
+ sgen_marksweep_conc_init (&major_collector);
} else if (!strcmp (major_collector_opt, "copying")) {
sgen_copying_init (&major_collector);
} else {
if (g_str_has_prefix (opt, "wbarrier=")) {
opt = strchr (opt, '=') + 1;
if (strcmp (opt, "remset") == 0) {
+ if (major_collector.is_concurrent) {
+ fprintf (stderr, "The concurrent collector does not support the SSB write barrier.\n");
+ exit (1);
+ }
use_cardtable = FALSE;
} else if (strcmp (opt, "cardtable") == 0) {
if (!use_cardtable) {
if (major_collector.is_parallel)
sgen_workers_init (num_workers);
+ else if (major_collector.is_concurrent)
+ sgen_workers_init (1);
if (major_collector_opt)
g_free (major_collector_opt);
do_scan_starts_check = TRUE;
} else if (!strcmp (opt, "verify-nursery-at-minor-gc")) {
do_verify_nursery = TRUE;
+ } else if (!strcmp (opt, "check-concurrent")) {
+ if (!major_collector.is_concurrent) {
+ fprintf (stderr, "Error: check-concurrent only world with concurrent major collectors.\n");
+ exit (1);
+ }
+ do_concurrent_checks = TRUE;
} else if (!strcmp (opt, "dump-nursery-at-minor-gc")) {
do_dump_nursery_content = TRUE;
} else if (!strcmp (opt, "no-managed-allocator")) {
fprintf (stderr, " disable-minor\n");
fprintf (stderr, " disable-major\n");
fprintf (stderr, " xdomain-checks\n");
+ fprintf (stderr, " check-concurrent\n");
fprintf (stderr, " clear-at-gc\n");
fprintf (stderr, " clear-nursery-at-gc\n");
fprintf (stderr, " check-scan-starts\n");
}
if (major_collector.post_param_init)
- major_collector.post_param_init ();
+ major_collector.post_param_init (&major_collector);
sgen_memgov_init (max_heap, soft_limit, debug_print_allowance, allowance_ratio, save_target);
mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
- // if (!ptr_in_nursery (*ptr)) return;
- mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_byte (mb, CEE_LDIND_I);
- mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
- mono_mb_emit_byte (mb, CEE_SHR_UN);
- mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
- nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
+ if (!major_collector.is_concurrent) {
+ // if (!ptr_in_nursery (*ptr)) return;
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_byte (mb, CEE_LDIND_I);
+ mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
+ mono_mb_emit_byte (mb, CEE_SHR_UN);
+ mono_mb_emit_icon (mb, (mword)sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
+ nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
+ }
#else
int label_continue1, label_continue2;
int dereferenced_var;
mono_mb_emit_byte (mb, CEE_LDIND_I);
mono_mb_emit_stloc (mb, dereferenced_var);
- // if (*ptr < sgen_get_nursery_start ()) return;
- mono_mb_emit_ldloc (mb, dereferenced_var);
- mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
- nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
+ if (!major_collector.is_concurrent) {
+ // if (*ptr < sgen_get_nursery_start ()) return;
+ mono_mb_emit_ldloc (mb, dereferenced_var);
+ mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_start ());
+ nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
- // if (*ptr >= sgen_get_nursery_end ()) return;
- mono_mb_emit_ldloc (mb, dereferenced_var);
- mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
- nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
+ // if (*ptr >= sgen_get_nursery_end ()) return;
+ mono_mb_emit_ldloc (mb, dereferenced_var);
+ mono_mb_emit_ptr (mb, (gpointer) sgen_get_nursery_end ());
+ nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
+ }
#endif
}
void
sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
{
- major_collector.scan_card_table (queue);
+ major_collector.scan_card_table (FALSE, queue);
}
SgenMajorCollector*
*/
#define SGEN_LOAD_VTABLE(addr) ((*(mword*)(addr)) & ~SGEN_VTABLE_BITS_MASK)
-#if SGEN_MAX_DEBUG_LEVEL >= 9
+#if defined(SGEN_GRAY_OBJECT_ENQUEUE) || SGEN_MAX_DEBUG_LEVEL >= 9
#define GRAY_OBJECT_ENQUEUE sgen_gray_object_enqueue
#define GRAY_OBJECT_DEQUEUE(queue,o) ((o) = sgen_gray_object_dequeue ((queue)))
#else
INTERNAL_MEM_MS_BLOCK_INFO,
INTERNAL_MEM_EPHEMERON_LINK,
INTERNAL_MEM_WORKER_DATA,
+ INTERNAL_MEM_WORKER_JOB_DATA,
INTERNAL_MEM_BRIDGE_DATA,
INTERNAL_MEM_BRIDGE_HASH_TABLE,
INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY,
INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY,
INTERNAL_MEM_JOB_QUEUE_ENTRY,
INTERNAL_MEM_TOGGLEREF_DATA,
+ INTERNAL_MEM_CARDTABLE_MOD_UNION,
INTERNAL_MEM_MAX
};
ObjectList *next;
};
+typedef void (*CopyOrMarkObjectFunc) (void**, SgenGrayQueue*);
+typedef void (*ScanObjectFunc) (char*, SgenGrayQueue*);
+typedef void (*ScanVTypeFunc) (char*, mword desc, SgenGrayQueue*);
+
+typedef struct
+{
+ ScanObjectFunc scan_func;
+ CopyOrMarkObjectFunc copy_func;
+ SgenGrayQueue *queue;
+} ScanCopyContext;
+
void sgen_report_internal_mem_usage (void) MONO_INTERNAL;
void sgen_report_pinned_mem_usage (SgenPinnedAllocator *alc) MONO_INTERNAL;
void sgen_dump_internal_mem_usage (FILE *heap_dump_file) MONO_INTERNAL;
void** sgen_find_optimized_pin_queue_area (void *start, void *end, int *num) MONO_INTERNAL;
void sgen_find_section_pin_queue_start_end (GCMemSection *section) MONO_INTERNAL;
-void sgen_pin_objects_in_section (GCMemSection *section, SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx) MONO_INTERNAL;
void sgen_pin_stats_register_object (char *obj, size_t size);
void sgen_pin_stats_register_global_remset (char *obj);
void sgen_sort_addresses (void **array, int size) MONO_INTERNAL;
void sgen_add_to_global_remset (gpointer ptr) MONO_INTERNAL;
-typedef void (*CopyOrMarkObjectFunc) (void**, SgenGrayQueue*);
-typedef void (*ScanObjectFunc) (char*, SgenGrayQueue*);
-typedef void (*ScanVTypeFunc) (char*, mword desc, SgenGrayQueue*);
-
int sgen_get_current_collection_generation (void) MONO_INTERNAL;
gboolean sgen_collection_is_parallel (void) MONO_INTERNAL;
+gboolean sgen_collection_is_concurrent (void) MONO_INTERNAL;
+gboolean sgen_concurrent_collection_in_progress (void) MONO_INTERNAL;
+
+gboolean sgen_remember_major_object_for_concurrent_mark (char *obj) MONO_INTERNAL;
typedef struct {
CopyOrMarkObjectFunc copy_or_mark_object;
}
typedef struct {
- char* (*alloc_for_promotion) (char *obj, size_t objsize, gboolean has_references);
- char* (*par_alloc_for_promotion) (char *obj, size_t objsize, gboolean has_references);
+ char* (*alloc_for_promotion) (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references);
+ char* (*par_alloc_for_promotion) (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references);
SgenObjectOperations serial_ops;
SgenObjectOperations parallel_ops;
struct _SgenMajorCollector {
size_t section_size;
gboolean is_parallel;
+ gboolean is_concurrent;
gboolean supports_cardtable;
+ gboolean sweeps_lazily;
/*
* This is set to TRUE if the sweep for the last major
void* (*alloc_heap) (mword nursery_size, mword nursery_align, int nursery_bits);
gboolean (*is_object_live) (char *obj);
- void* (*alloc_small_pinned_obj) (size_t size, gboolean has_references);
+ void* (*alloc_small_pinned_obj) (MonoVTable *vtable, size_t size, gboolean has_references);
void* (*alloc_degraded) (MonoVTable *vtable, size_t size);
SgenObjectOperations major_ops;
- void* (*alloc_object) (int size, gboolean has_references);
- void* (*par_alloc_object) (int size, gboolean has_references);
+ void* (*alloc_object) (MonoVTable *vtable, int size, gboolean has_references);
+ void* (*par_alloc_object) (MonoVTable *vtable, int size, gboolean has_references);
void (*free_pinned_object) (char *obj, size_t size);
void (*iterate_objects) (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data);
void (*free_non_pinned_object) (char *obj, size_t size);
void (*find_pin_queue_start_ends) (SgenGrayQueue *queue);
void (*pin_objects) (SgenGrayQueue *queue);
void (*pin_major_object) (char *obj, SgenGrayQueue *queue);
- void (*scan_card_table) (SgenGrayQueue *queue);
+ void (*scan_card_table) (gboolean mod_union, SgenGrayQueue *queue);
void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
+ void (*update_cardtable_mod_union) (void);
void (*init_to_space) (void);
void (*sweep) (void);
void (*check_scan_starts) (void);
gboolean (*handle_gc_param) (const char *opt);
void (*print_gc_param_usage) (void);
gboolean (*is_worker_thread) (MonoNativeThreadId thread);
- void (*post_param_init) (void);
+ void (*post_param_init) (SgenMajorCollector *collector);
void* (*alloc_worker_data) (void);
void (*init_worker_thread) (void *data);
void (*reset_worker_data) (void *data);
gboolean (*is_valid_object) (char *object);
gboolean (*describe_pointer) (char *pointer);
+ long long (*get_and_reset_num_major_objects_marked) (void);
};
extern SgenMajorCollector major_collector;
void sgen_marksweep_fixed_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_marksweep_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_marksweep_fixed_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
+void sgen_marksweep_conc_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_copying_init (SgenMajorCollector *collector) MONO_INTERNAL;
SgenMajorCollector* sgen_get_major_collector (void) MONO_INTERNAL;
void sgen_mark_bridge_object (MonoObject *obj) MONO_INTERNAL;
void sgen_bridge_register_finalized_object (MonoObject *object) MONO_INTERNAL;
-void sgen_scan_togglerefs (CopyOrMarkObjectFunc copy_func, char *start, char *end, SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_scan_togglerefs (char *start, char *end, ScanCopyContext ctx) MONO_INTERNAL;
void sgen_process_togglerefs (void) MONO_INTERNAL;
typedef mono_bool (*WeakLinkAlivePredicateFunc) (MonoObject*, void*);
void sgen_queue_finalization_entry (MonoObject *obj) MONO_INTERNAL;
const char* sgen_generation_name (int generation) MONO_INTERNAL;
-void sgen_collect_bridge_objects (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, SgenGrayQueue *queue) MONO_INTERNAL;
-void sgen_finalize_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, SgenGrayQueue *queue) MONO_INTERNAL;
-void sgen_null_link_in_range (CopyOrMarkObjectFunc copy_func, char *start, char *end, int generation, gboolean before_finalization, SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_collect_bridge_objects (char *start, char *end, int generation, ScanCopyContext ctx) MONO_INTERNAL;
+void sgen_finalize_in_range (char *start, char *end, int generation, ScanCopyContext ctx) MONO_INTERNAL;
+void sgen_null_link_in_range (char *start, char *end, int generation, gboolean before_finalization, ScanCopyContext ctx) MONO_INTERNAL;
void sgen_null_links_for_domain (MonoDomain *domain, int generation) MONO_INTERNAL;
void sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation) MONO_INTERNAL;
void sgen_process_fin_stage_entries (void) MONO_INTERNAL;
void sgen_process_dislink_stage_entries (void) MONO_INTERNAL;
void sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc) MONO_INTERNAL;
+gboolean sgen_drain_gray_stack (int max_objs, ScanCopyContext ctx) MONO_INTERNAL;
+
enum {
SPACE_NURSERY,
SPACE_MAJOR,
void sgen_set_pinned_from_failed_allocation (mword objsize) MONO_INTERNAL;
void sgen_ensure_free_space (size_t size) MONO_INTERNAL;
-void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason) MONO_INTERNAL;
+void sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean wait_to_finish) MONO_INTERNAL;
gboolean sgen_has_critical_method (void) MONO_INTERNAL;
gboolean sgen_is_critical_method (MonoMethod *method) MONO_INTERNAL;
typedef struct _LOSObject LOSObject;
struct _LOSObject {
LOSObject *next;
- mword size; /* this is the object size */
- guint16 huge_object;
- int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN and data starting at same alignment */
+ mword size; /* this is the object size, lowest bit used for pin/mark */
+ guint8 *cardtable_mod_union; /* only used by the concurrent collector */
+#if SIZEOF_VOID_P < 8
+ mword dummy; /* to align object to sizeof (double) */
+#endif
char data [MONO_ZERO_LEN_ARRAY];
};
gboolean sgen_ptr_is_in_los (char *ptr, char **start) MONO_INTERNAL;
void sgen_los_iterate_objects (IterateObjectCallbackFunc cb, void *user_data) MONO_INTERNAL;
void sgen_los_iterate_live_block_ranges (sgen_cardtable_block_callback callback) MONO_INTERNAL;
-void sgen_los_scan_card_table (SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_los_scan_card_table (gboolean mod_union, SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_los_update_cardtable_mod_union (void) MONO_INTERNAL;
void sgen_major_collector_scan_card_table (SgenGrayQueue *queue) MONO_INTERNAL;
gboolean sgen_los_is_valid_object (char *object) MONO_INTERNAL;
gboolean mono_sgen_los_describe_pointer (char *ptr) MONO_INTERNAL;
+LOSObject* sgen_los_header_for_object (char *data) MONO_INTERNAL;
+mword sgen_los_object_size (LOSObject *obj) MONO_INTERNAL;
+void sgen_los_pin_object (char *obj) MONO_INTERNAL;
+void sgen_los_unpin_object (char *obj) MONO_INTERNAL;
+gboolean sgen_los_object_is_pinned (char *obj) MONO_INTERNAL;
+
/* nursery allocator */
void sgen_clear_nursery_fragments (void) MONO_INTERNAL;
void sgen_nursery_allocator_prepare_for_pinning (void) MONO_INTERNAL;
void sgen_nursery_allocator_set_nursery_bounds (char *nursery_start, char *nursery_end) MONO_INTERNAL;
-mword sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries) MONO_INTERNAL;
+mword sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries, SgenGrayQueue *unpin_queue) MONO_INTERNAL;
void sgen_init_nursery_allocator (void) MONO_INTERNAL;
void sgen_nursery_allocator_init_heavy_stats (void) MONO_INTERNAL;
void sgen_alloc_init_heavy_stats (void) MONO_INTERNAL;
#define GRAY_QUEUE_LENGTH_LIMIT 64
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+#define STATE_TRANSITION(s,o,n) do { \
+ int __old = (o); \
+ if (InterlockedCompareExchange ((volatile int*)&(s)->state, (n), __old) != __old) \
+ g_assert_not_reached (); \
+ } while (0)
+#define STATE_SET(s,v) (s)->state = (v)
+#define STATE_ASSERT(s,v) g_assert ((s)->state == (v))
+#else
+#define STATE_TRANSITION(s,o,n)
+#define STATE_SET(s,v)
+#define STATE_ASSERT(s,v)
+#endif
+
void
sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue)
{
/* Use the previously allocated queue sections if possible */
section = queue->free_list;
queue->free_list = section->next;
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
} else {
/* Allocate a new section */
section = sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
+ STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
}
section->end = 0;
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
/* Link it with the others */
section->next = queue->first;
queue->first = section;
void
sgen_gray_object_free_queue_section (GrayQueueSection *section)
{
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
}
{
SGEN_ASSERT (9, obj, "enqueueing a null object");
//sgen_check_objref (obj);
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ if (queue->enqueue_check_func)
+ queue->enqueue_check_func (obj);
+#endif
+
if (G_UNLIKELY (!queue->first || queue->first->end == SGEN_GRAY_QUEUE_SECTION_SIZE))
sgen_gray_object_alloc_queue_section (queue);
+ STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
SGEN_ASSERT (9, queue->first->end < SGEN_GRAY_QUEUE_SECTION_SIZE, "gray queue %p overflow, first %p, end %d", queue, queue->first, queue->first->end);
queue->first->objects [queue->first->end++] = obj;
-
- SGEN_LOG_DO (9, ++queue->balance);
}
char*
if (sgen_gray_object_queue_is_empty (queue))
return NULL;
+ STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
SGEN_ASSERT (9, queue->first->end, "gray queue %p underflow, first %p, end %d", queue, queue->first, queue->first->end);
obj = queue->first->objects [--queue->first->end];
GrayQueueSection *section = queue->first;
queue->first = section->next;
section->next = queue->free_list;
+
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
+
queue->free_list = section;
}
- SGEN_LOG_DO (9, --queue->balance);
-
return obj;
}
section->next = NULL;
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
+
return section;
}
void
sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section)
{
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
section->next = queue->first;
queue->first = section;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ if (queue->enqueue_check_func) {
+ int i;
+ for (i = 0; i < section->end; ++i)
+ queue->enqueue_check_func (section->objects [i]);
+ }
+#endif
}
void
-sgen_gray_object_queue_init (SgenGrayQueue *queue)
+sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func)
{
GrayQueueSection *section, *next;
int i;
g_assert (sgen_gray_object_queue_is_empty (queue));
- SGEN_ASSERT (9, queue->balance == 0, "unbalanced queue on init %d", queue->balance);
+
+ queue->alloc_prepare_func = NULL;
+ queue->alloc_prepare_data = NULL;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ queue->enqueue_check_func = enqueue_check_func;
+#endif
/* Free the extra sections allocated during the last collection */
i = 0;
- for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next)
+ for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
+ STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
i ++;
+ }
if (!section)
return;
while (section->next) {
next = section->next;
section->next = next->next;
+ STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
sgen_gray_object_free_queue_section (next);
}
}
+static void
+invalid_prepare_func (SgenGrayQueue *queue)
+{
+ g_assert_not_reached ();
+}
+
void
-sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc func, void *data)
+sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue)
{
- sgen_gray_object_queue_init (queue);
- queue->alloc_prepare_func = func;
+ sgen_gray_object_queue_init (queue, FALSE);
+ queue->alloc_prepare_func = invalid_prepare_func;
+ queue->alloc_prepare_data = NULL;
+}
+
+void
+sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
+ GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
+{
+ sgen_gray_object_queue_init (queue, enqueue_check_func);
+ queue->alloc_prepare_func = alloc_prepare_func;
queue->alloc_prepare_data = data;
}
+void
+sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
+{
+ g_assert (!queue->first);
+ while (queue->free_list) {
+ GrayQueueSection *next = queue->free_list->next;
+ STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
+ sgen_gray_object_free_queue_section (queue->free_list);
+ queue->free_list = next;
+ }
+}
+
+void
+sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue)
+{
+ queue->alloc_prepare_func = NULL;
+ queue->alloc_prepare_data = NULL;
+}
+
+static void
+lock_section_queue (SgenSectionGrayQueue *queue)
+{
+ if (!queue->locked)
+ return;
+
+ mono_mutex_lock (&queue->lock);
+}
+
+static void
+unlock_section_queue (SgenSectionGrayQueue *queue)
+{
+ if (!queue->locked)
+ return;
+
+ mono_mutex_unlock (&queue->lock);
+}
+
+void
+sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
+{
+ g_assert (sgen_section_gray_queue_is_empty (queue));
+
+ queue->locked = locked;
+ if (locked) {
+ mono_mutexattr_t attr;
+ mono_mutexattr_init (&attr);
+ mono_mutexattr_settype (&attr, MONO_MUTEX_RECURSIVE);
+ mono_mutex_init (&queue->lock, &attr);
+ }
+
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ queue->enqueue_check_func = enqueue_check_func;
+#endif
+}
+
+gboolean
+sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
+{
+ return !queue->first;
+}
+
+GrayQueueSection*
+sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
+{
+ GrayQueueSection *section;
+
+ lock_section_queue (queue);
+
+ if (queue->first) {
+ section = queue->first;
+ queue->first = section->next;
+
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
+
+ section->next = NULL;
+ } else {
+ section = NULL;
+ }
+
+ unlock_section_queue (queue);
+
+ return section;
+}
+
+void
+sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
+{
+ STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
+
+ lock_section_queue (queue);
+
+ section->next = queue->first;
+ queue->first = section;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ if (queue->enqueue_check_func) {
+ int i;
+ for (i = 0; i < section->end; ++i)
+ queue->enqueue_check_func (section->objects [i]);
+ }
+#endif
+
+ unlock_section_queue (queue);
+}
+
#endif
/*
+ * sgen-gray.h: Gray queue management.
+ *
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
*
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MONO_SGEN_GRAY_H__
#define __MONO_SGEN_GRAY_H__
#define SGEN_GRAY_QUEUE_SECTION_SIZE (128 - 3)
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+typedef enum {
+ GRAY_QUEUE_SECTION_STATE_FLOATING,
+ GRAY_QUEUE_SECTION_STATE_ENQUEUED,
+ GRAY_QUEUE_SECTION_STATE_FREE_LIST,
+ GRAY_QUEUE_SECTION_STATE_FREED
+} GrayQueueSectionState;
+#endif
+
/*
* This is a stack now instead of a queue, so the most recently added items are removed
* first, improving cache locality, and keeping the stack size manageable.
*/
typedef struct _GrayQueueSection GrayQueueSection;
struct _GrayQueueSection {
+#ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
+ /*
+ * The dummy is here so that the state doesn't get overwritten
+ * by the internal allocator once the section is freed.
+ */
+ int dummy;
+ GrayQueueSectionState state;
+#endif
int end;
GrayQueueSection *next;
char *objects [SGEN_GRAY_QUEUE_SECTION_SIZE];
typedef struct _SgenGrayQueue SgenGrayQueue;
typedef void (*GrayQueueAllocPrepareFunc) (SgenGrayQueue*);
+typedef void (*GrayQueueEnqueueCheckFunc) (char*);
struct _SgenGrayQueue {
GrayQueueSection *first;
GrayQueueSection *free_list;
- int balance;
GrayQueueAllocPrepareFunc alloc_prepare_func;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ GrayQueueEnqueueCheckFunc enqueue_check_func;
+#endif
void *alloc_prepare_data;
};
+typedef struct _SgenSectionGrayQueue SgenSectionGrayQueue;
+
+struct _SgenSectionGrayQueue {
+ GrayQueueSection *first;
+ gboolean locked;
+ mono_mutex_t lock;
+#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
+ GrayQueueEnqueueCheckFunc enqueue_check_func;
+#endif
+};
+
void sgen_gray_object_enqueue (SgenGrayQueue *queue, char *obj) MONO_INTERNAL;
char* sgen_gray_object_dequeue (SgenGrayQueue *queue) MONO_INTERNAL;
GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue) MONO_INTERNAL;
void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section) MONO_INTERNAL;
-void sgen_gray_object_queue_init (SgenGrayQueue *queue) MONO_INTERNAL;
-void sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc func, void *data) MONO_INTERNAL;
+void sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func) MONO_INTERNAL;
+void sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
+ GrayQueueAllocPrepareFunc func, void *data) MONO_INTERNAL;
+void sgen_gray_object_queue_deinit (SgenGrayQueue *queue) MONO_INTERNAL;
+void sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue) MONO_INTERNAL;
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue) MONO_INTERNAL;
void sgen_gray_object_free_queue_section (GrayQueueSection *section) MONO_INTERNAL;
-gboolean sgen_drain_gray_stack (SgenGrayQueue *queue, int max_objs) MONO_INTERNAL;
+
+void sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked,
+ GrayQueueEnqueueCheckFunc enqueue_check_func) MONO_INTERNAL;
+gboolean sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue) MONO_INTERNAL;
+GrayQueueSection* sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue) MONO_INTERNAL;
+void sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section) MONO_INTERNAL;
static inline gboolean
sgen_gray_object_queue_is_empty (SgenGrayQueue *queue)
/*
- * sgen-internal.c
+ * sgen-internal.c: Internal lock-free memory allocator.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "config.h"
case INTERNAL_MEM_MS_BLOCK_INFO: return "marksweep-block-info";
case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
case INTERNAL_MEM_WORKER_DATA: return "worker-data";
+ case INTERNAL_MEM_WORKER_JOB_DATA: return "worker-job-data";
case INTERNAL_MEM_BRIDGE_DATA: return "bridge-data";
case INTERNAL_MEM_BRIDGE_HASH_TABLE: return "bridge-hash-table";
case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY: return "bridge-hash-table-entry";
case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY: return "bridge-alive-hash-table-entry";
case INTERNAL_MEM_JOB_QUEUE_ENTRY: return "job-queue-entry";
case INTERNAL_MEM_TOGGLEREF_DATA: return "toggleref-data";
+ case INTERNAL_MEM_CARDTABLE_MOD_UNION: return "cardtable-mod-union";
default:
g_assert_not_reached ();
}
if (size > allocator_sizes [NUM_ALLOCATORS - 1]) {
p = sgen_alloc_os_memory (size, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, NULL);
if (!p)
- sgen_assert_memory_alloc (NULL, description_for_type (type));
+ sgen_assert_memory_alloc (NULL, size, description_for_type (type));
return p;
}
p = mono_lock_free_alloc (&allocators [index]);
if (!p)
- sgen_assert_memory_alloc (NULL, description_for_type (type));
+ sgen_assert_memory_alloc (NULL, size, description_for_type (type));
memset (p, 0, size);
return p;
}
void **vtslot;
g_assert (size > SGEN_MAX_SMALL_OBJ_SIZE);
+ g_assert ((size & 1) == 0);
#ifdef LOS_DUMMY
if (!los_segment)
alloc_size &= ~(pagesize - 1);
if (sgen_memgov_try_alloc_space (alloc_size, SPACE_LOS)) {
obj = sgen_alloc_os_memory (alloc_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, NULL);
- if (obj)
- obj->huge_object = TRUE;
}
} else {
obj = get_los_section_memory (size + sizeof (LOSObject));
los_consistency_check ();
#endif
+ if (sgen_remember_major_object_for_concurrent_mark (obj->data))
+ sgen_los_pin_object (obj->data);
+
return obj->data;
}
#ifdef SGEN_HAVE_CARDTABLE
void
-sgen_los_scan_card_table (SgenGrayQueue *queue)
+sgen_los_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
{
LOSObject *obj;
for (obj = los_object_list; obj; obj = obj->next) {
- sgen_cardtable_scan_object (obj->data, obj->size, NULL, queue);
+ guint8 *cards = NULL;
+ if (mod_union) {
+ cards = obj->cardtable_mod_union;
+ g_assert (cards);
+ }
+
+ sgen_cardtable_scan_object (obj->data, obj->size, cards, queue);
+ }
+}
+
+void
+sgen_los_update_cardtable_mod_union (void)
+{
+ LOSObject *obj;
+
+ for (obj = los_object_list; obj; obj = obj->next) {
+ guint8 *start_card = sgen_card_table_get_card_scan_address ((mword)obj->data);
+ guint8 *end_card = sgen_card_table_get_card_scan_address ((mword)obj->data + obj->size - 1) + 1;
+ size_t num_cards = end_card - start_card;
+
+ if (!obj->cardtable_mod_union) {
+ obj->cardtable_mod_union = sgen_alloc_internal_dynamic (num_cards,
+ INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
+ memcpy (obj->cardtable_mod_union, start_card, num_cards);
+ } else {
+ int i;
+ for (i = 0; i < num_cards; ++i)
+ obj->cardtable_mod_union [i] |= start_card [i];
+ }
}
}
#endif
+mword
+sgen_los_object_size (LOSObject *obj)
+{
+ return obj->size & ~1L;
+}
+
+LOSObject*
+sgen_los_header_for_object (char *data)
+{
+ return (LOSObject*)(data - (sizeof (LOSObject*) + sizeof (mword)));
+}
+
+void
+sgen_los_pin_object (char *data)
+{
+ LOSObject *obj = sgen_los_header_for_object (data);
+ obj->size = obj->size | 1;
+ binary_protocol_pin (data, (gpointer)SGEN_LOAD_VTABLE (data), sgen_safe_object_get_size ((MonoObject*)data));
+}
+
+void
+sgen_los_unpin_object (char *data)
+{
+ LOSObject *obj = sgen_los_header_for_object (data);
+ obj->size = sgen_los_object_size (obj);
+}
+
+gboolean
+sgen_los_object_is_pinned (char *data)
+{
+ LOSObject *obj = sgen_los_header_for_object (data);
+ return obj->size & 1;
+}
+
#endif /* HAVE_SGEN_GC */
}
static void*
-major_alloc_object (int size, gboolean has_references)
+major_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
{
char *dest = to_space_bumper;
/* Make sure we have enough space available */
to_space_bumper += size;
SGEN_ASSERT (8, to_space_bumper <= to_space_top, "to-space-bumper %p overflow to-space-top %p", to_space_bumper, to_space_top);
to_space_section->scan_starts [(dest - (char*)to_space_section->data)/SGEN_SCAN_START_SIZE] = dest;
+ /* FIXME: write vtable */
+ g_assert_not_reached ();
return dest;
}
/* size is a multiple of ALLOC_ALIGN */
static void*
-major_alloc_small_pinned_obj (size_t size, gboolean has_references)
+major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
{
+ /* FIXME: write vtable */
+ g_assert_not_reached ();
return sgen_alloc_pinned (&pinned_allocator, size);
}
major_pin_objects (SgenGrayQueue *queue)
{
GCMemSection *section;
+ ScanCopyContext ctx = { NULL, NULL, queue };
for (section = section_list; section; section = section->block.next)
- sgen_pin_objects_in_section (section, queue);
+ sgen_pin_objects_in_section (section, ctx);
}
static void
#define PREFETCH_DYNAMIC_HEAP(addr) PREFETCH ((addr))
#endif
+#ifdef SGEN_CONCURRENT_MARK
+#define FOLLOW_OBJECT(addr) (!sgen_ptr_in_nursery ((addr)))
+#else
+#define FOLLOW_OBJECT(addr) 1
+#endif
+
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
void *__old = *(ptr); \
void *__copy; \
- if (__old) { \
+ if (__old && FOLLOW_OBJECT (__old)) { \
PREFETCH_DYNAMIC_HEAP (__old); \
major_copy_or_mark_object ((ptr), queue); \
__copy = *(ptr); \
HEAVY_STAT (++stat_scan_object_called_major);
}
+
+#ifdef SGEN_CONCURRENT_MARK
+#ifdef SGEN_PARALLEL_MARK
+#error concurrent and parallel mark not supported yet
+#else
+static void
+major_scan_vtype (char *start, mword desc, SgenGrayQueue *queue)
+{
+ /* The descriptors include info about the MonoObject header as well */
+ start -= sizeof (MonoObject);
+
+#define SCAN_OBJECT_NOVTABLE
+#include "sgen-scan-object.h"
+}
+#endif
+#endif
--- /dev/null
+#define SGEN_CONCURRENT_MARK
+
+#include "sgen-marksweep.c"
void **free_list;
MSBlockInfo *next_free;
void **pin_queue_start;
+#ifdef SGEN_CONCURRENT_MARK
+ guint8 *cardtable_mod_union;
+#endif
mword mark_words [MS_NUM_MARK_WORDS];
};
static gboolean *evacuate_block_obj_sizes;
static float evacuation_threshold = 0.666;
-static gboolean concurrent_sweep = FALSE;
static gboolean lazy_sweep = TRUE;
static gboolean have_swept;
static long long stat_major_blocks_freed = 0;
static long long stat_major_blocks_lazy_swept = 0;
static long long stat_major_objects_evacuated = 0;
-static long long stat_time_wait_for_sweep = 0;
-
-static gboolean ms_sweep_in_progress = FALSE;
-static MonoNativeThreadId ms_sweep_thread;
-static MonoSemType ms_sweep_cmd_semaphore;
-static MonoSemType ms_sweep_done_semaphore;
-
-static void
-sweep_block (MSBlockInfo *block);
-
-static void
-ms_signal_sweep_command (void)
-{
- if (!concurrent_sweep)
- return;
-
- g_assert (!ms_sweep_in_progress);
- ms_sweep_in_progress = TRUE;
- MONO_SEM_POST (&ms_sweep_cmd_semaphore);
-}
-static void
-ms_signal_sweep_done (void)
-{
- if (!concurrent_sweep)
- return;
+static long long num_major_objects_marked = 0;
- MONO_SEM_POST (&ms_sweep_done_semaphore);
-}
+#ifdef SGEN_COUNT_NUMBER_OF_MAJOR_OBJECTS_MARKED
+#define INC_NUM_MAJOR_OBJECTS_MARKED() (++num_major_objects_marked)
+#else
+#define INC_NUM_MAJOR_OBJECTS_MARKED()
+#endif
static void
-ms_wait_for_sweep_done (void)
-{
- SGEN_TV_DECLARE (atv);
- SGEN_TV_DECLARE (btv);
- int result;
-
- if (!concurrent_sweep)
- return;
-
- if (!ms_sweep_in_progress)
- return;
-
- SGEN_TV_GETTIME (atv);
- while ((result = MONO_SEM_WAIT (&ms_sweep_done_semaphore)) != 0) {
- if (errno != EINTR)
- g_error ("MONO_SEM_WAIT");
- }
- SGEN_TV_GETTIME (btv);
- stat_time_wait_for_sweep += SGEN_TV_ELAPSED (atv, btv);
-
- g_assert (ms_sweep_in_progress);
- ms_sweep_in_progress = FALSE;
-}
+sweep_block (MSBlockInfo *block);
static int
ms_find_block_obj_size_index (int size)
header = (MSBlockHeader*) info->block;
header->info = info;
#endif
+#ifdef SGEN_CONCURRENT_MARK
+ info->cardtable_mod_union = NULL;
+#endif
update_heap_boundaries_for_block (info);
}
static void*
-alloc_obj_par (int size, gboolean pinned, gboolean has_references)
+alloc_obj_par (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
{
int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
MSBlockInfo **free_blocks_local = FREE_BLOCKS_LOCAL (pinned, has_references);
MSBlockInfo *block;
void *obj;
- SGEN_ASSERT (9, !ms_sweep_in_progress, "concurrent sweep in progress with concurrent allocation");
SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
if (free_blocks_local [size_index]) {
}
}
- /*
- * FIXME: This should not be necessary because it'll be
- * overwritten by the vtable immediately.
- */
- *(void**)obj = NULL;
+ *(MonoVTable**)obj = vtable;
+
+#ifdef SGEN_CONCURRENT_MARK
+ g_assert_not_reached ();
+#endif
return obj;
}
static void*
-major_par_alloc_object (int size, gboolean has_references)
+major_par_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
{
- return alloc_obj_par (size, FALSE, has_references);
+ return alloc_obj_par (vtable, size, FALSE, has_references);
}
#endif
static void*
-alloc_obj (int size, gboolean pinned, gboolean has_references)
+alloc_obj (MonoVTable *vtable, int size, gboolean pinned, gboolean has_references)
{
int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
#endif
- SGEN_ASSERT (9, !ms_sweep_in_progress, "concurrent sweep in progress with concurrent allocation");
-
if (!free_blocks [size_index]) {
if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references)))
return NULL;
obj = unlink_slot_from_free_list_uncontested (free_blocks, size_index);
- /*
- * FIXME: This should not be necessary because it'll be
- * overwritten by the vtable immediately.
- */
- *(void**)obj = NULL;
+ *(MonoVTable**)obj = vtable;
+
+#ifdef SGEN_CONCURRENT_MARK
+ if (obj && sgen_remember_major_object_for_concurrent_mark (obj)) {
+ MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+ int word, bit;
+ MS_CALC_MARK_BIT (word, bit, obj);
+ MS_SET_MARK_BIT (block, word, bit);
+ binary_protocol_mark (obj, NULL, size);
+ INC_NUM_MAJOR_OBJECTS_MARKED ();
+ }
+#endif
return obj;
}
static void*
-major_alloc_object (int size, gboolean has_references)
+major_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
{
- return alloc_obj (size, FALSE, has_references);
+ return alloc_obj (vtable, size, FALSE, has_references);
}
/*
/* size is a multiple of SGEN_ALLOC_ALIGN */
static void*
-major_alloc_small_pinned_obj (size_t size, gboolean has_references)
+major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
{
void *res;
- ms_wait_for_sweep_done ();
-
- res = alloc_obj (size, TRUE, has_references);
+ res = alloc_obj (vtable, size, TRUE, has_references);
/*If we failed to alloc memory, we better try releasing memory
*as pinned alloc is requested by the runtime.
*/
if (!res) {
- sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure");
- res = alloc_obj (size, TRUE, has_references);
+ sgen_perform_collection (0, GENERATION_OLD, "pinned alloc failure", TRUE);
+ res = alloc_obj (vtable, size, TRUE, has_references);
}
return res;
}
void *obj;
int old_num_sections;
- ms_wait_for_sweep_done ();
-
old_num_sections = num_major_sections;
- obj = alloc_obj (size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
+ obj = alloc_obj (vtable, size, FALSE, SGEN_VTABLE_HAS_REFERENCES (vtable));
if (G_LIKELY (obj)) {
- *(MonoVTable**)obj = vtable;
HEAVY_STAT (++stat_objects_alloced_degraded);
HEAVY_STAT (stat_bytes_alloced_degraded += size);
g_assert (num_major_sections >= old_num_sections);
{
MSBlockInfo *block;
- ms_wait_for_sweep_done ();
-
FOREACH_BLOCK (block) {
int count = MS_BLOCK_FREE / block->obj_size;
int i;
{
MSBlockInfo *block;
- ms_wait_for_sweep_done ();
FOREACH_BLOCK (block) {
int idx;
char *obj;
if ((block)->has_references) \
GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+ INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
#define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
if ((block)->has_references) \
GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+ INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
#define MS_PAR_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
if ((block)->has_references) \
GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((MonoObject*)(obj))); \
+ INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
static void
pin_major_object (char *obj, SgenGrayQueue *queue)
{
+#ifdef SGEN_CONCURRENT_MARK
+ g_assert_not_reached ();
+#else
MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
block->has_pinned = TRUE;
MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
+#endif
}
#include "sgen-major-copy-object.h"
objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
- destination = sgen_minor_collector.par_alloc_for_promotion (obj, objsize, has_references);
+ destination = sgen_minor_collector.par_alloc_for_promotion (vt, obj, objsize, has_references);
if (G_UNLIKELY (!destination)) {
if (!sgen_ptr_in_nursery (obj)) {
int size_index;
return;
}
- /*
- * We do this before the CAS because we want to make
- * sure that if another thread sees the destination
- * pointer the VTable is already in place. Not doing
- * this can crash binary protocols.
- */
- *(MonoVTable**)destination = vt;
-
if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
gboolean was_marked;
MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
} else {
-#ifdef FIXED_HEAP
- mword vtable_word = *(mword*)obj;
- vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
-#endif
+ LOSObject *bigobj = sgen_los_header_for_object (obj);
+ mword size_word = bigobj->size;
- if (vtable_word & SGEN_PINNED_BIT)
+ if (size_word & 1)
return;
binary_protocol_pin (obj, vt, sgen_safe_object_get_size ((MonoObject*)obj));
- if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
+ if (SGEN_CAS_PTR ((void*)&bigobj->size, (void*)(size_word | 1), (void*)size_word) == (void*)size_word) {
if (SGEN_VTABLE_HAS_REFERENCES (vt))
GRAY_OBJECT_ENQUEUE (queue, obj);
} else {
- g_assert (SGEN_OBJECT_IS_PINNED (obj));
+ g_assert (sgen_los_object_is_pinned (obj));
}
}
}
}
#else
+#ifdef SGEN_CONCURRENT_MARK
+static void
+major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
+{
+ void *obj = *ptr;
+
+ g_assert (!SGEN_OBJECT_IS_FORWARDED (obj));
+
+ if (!sgen_ptr_in_nursery (obj)) {
+#ifdef FIXED_HEAP
+ if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
+#else
+ mword objsize;
+
+ objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
+
+ if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
+#endif
+ {
+ MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
+ MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
+ } else {
+ if (sgen_los_object_is_pinned (obj))
+ return;
+ if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
+ MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
+ MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
+ }
+ sgen_los_pin_object (obj);
+ /* FIXME: only enqueue if object has references */
+ GRAY_OBJECT_ENQUEUE (queue, obj);
+ INC_NUM_MAJOR_OBJECTS_MARKED ();
+ }
+ }
+}
+#else
static void
major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
{
MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
}
} else {
- if (SGEN_OBJECT_IS_PINNED (obj))
+ if (sgen_los_object_is_pinned (obj))
return;
binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
if (G_UNLIKELY (MONO_GC_OBJ_PINNED_ENABLED ())) {
MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
MONO_GC_OBJ_PINNED ((mword)obj, sgen_safe_object_get_size (obj), vt->klass->name_space, vt->klass->name, GENERATION_OLD);
}
- SGEN_PIN_OBJECT (obj);
+ sgen_los_pin_object (obj);
/* FIXME: only enqueue if object has references */
GRAY_OBJECT_ENQUEUE (queue, obj);
}
}
}
#endif
+#endif
+
+#ifdef SGEN_CONCURRENT_MARK
+static long long
+major_get_and_reset_num_major_objects_marked (void)
+{
+ long long num = num_major_objects_marked;
+ num_major_objects_marked = 0;
+ return num;
+}
+#endif
#include "sgen-major-scan-object.h"
count = MS_BLOCK_FREE / block->obj_size;
+#ifdef SGEN_CONCURRENT_MARK
+ if (block->cardtable_mod_union) {
+ sgen_free_internal_dynamic (block->cardtable_mod_union, CARDS_PER_BLOCK, INTERNAL_MEM_CARDTABLE_MOD_UNION);
+ block->cardtable_mod_union = NULL;
+ }
+#endif
+
/* Count marked objects in the block */
for (i = 0; i < MS_NUM_MARK_WORDS; ++i) {
nused += bitcount (block->mark_words [i]);
have_swept = TRUE;
}
-static mono_native_thread_return_t
-ms_sweep_thread_func (void *dummy)
-{
- g_assert (concurrent_sweep);
-
- for (;;) {
- int result;
-
- while ((result = MONO_SEM_WAIT (&ms_sweep_cmd_semaphore)) != 0) {
- if (errno != EINTR)
- g_error ("MONO_SEM_WAIT FAILED with %d errno %d (%s)", result, errno, strerror (errno));
- }
-
- ms_sweep ();
-
- ms_signal_sweep_done ();
- }
-
- return NULL;
-}
-
static void
major_sweep (void)
{
- if (concurrent_sweep) {
- g_assert (ms_sweep_thread);
- ms_signal_sweep_command ();
- } else {
- ms_sweep ();
- }
+ ms_sweep ();
}
static int count_pinned_ref;
static void
major_start_nursery_collection (void)
{
- ms_wait_for_sweep_done ();
-
#ifdef MARKSWEEP_CONSISTENCY_CHECK
consistency_check ();
#endif
{
int i;
- ms_wait_for_sweep_done ();
-
/* clear the free lists */
for (i = 0; i < num_block_obj_sizes; ++i) {
if (!evacuate_block_obj_sizes [i])
if (lazy_sweep) {
MSBlockInfo **iter;
+ MONO_GC_SWEEP_BEGIN (GENERATION_OLD, TRUE);
+
iter = &all_blocks;
while (*iter) {
MSBlockInfo *block = *iter;
iter = &block->next;
}
+
+ MONO_GC_SWEEP_END (GENERATION_OLD, TRUE);
}
}
int section_reserve = sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
g_assert (have_swept);
- ms_wait_for_sweep_done ();
- g_assert (!ms_sweep_in_progress);
/*
* FIXME: We don't free blocks on 32 bit platforms because it
}
evacuation_threshold = (float)percentage / 100.0;
return TRUE;
- } else if (!strcmp (opt, "concurrent-sweep")) {
- concurrent_sweep = TRUE;
+ } else if (!strcmp (opt, "lazy-sweep")) {
+ lazy_sweep = TRUE;
return TRUE;
- } else if (!strcmp (opt, "no-concurrent-sweep")) {
- concurrent_sweep = FALSE;
+ } else if (!strcmp (opt, "no-lazy-sweep")) {
+ lazy_sweep = FALSE;
return TRUE;
}
" major-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"
#endif
" evacuation-threshold=P (where P is a percentage, an integer in 0-100)\n"
- " (no-)concurrent-sweep\n"
+ " (no-)lazy-sweep\n"
);
}
#define MS_OBJ_ALLOCED_FAST(o,b) (*(void**)(o) && (*(char**)(o) < (b) || *(char**)(o) >= (b) + MS_BLOCK_SIZE))
static void
-major_scan_card_table (SgenGrayQueue *queue)
+major_scan_card_table (gboolean mod_union, SgenGrayQueue *queue)
{
MSBlockInfo *block;
ScanObjectFunc scan_func = sgen_get_current_object_ops ()->scan_object;
#endif
char *obj, *end, *base;
+ if (mod_union) {
+#ifdef SGEN_CONCURRENT_MARK
+ cards = block->cardtable_mod_union;
+ /*
+ * This happens when the nursery
+ * collection that precedes finishing
+ * the concurrent collection allocates
+ * new major blocks.
+ */
+ if (!cards)
+ continue;
+#else
+ g_assert_not_reached ();
+#endif
+ } else {
/*We can avoid the extra copy since the remark cardtable was cleaned before */
#ifdef SGEN_HAVE_OVERLAPPING_CARDS
- cards = sgen_card_table_get_card_scan_address ((mword)block_start);
+ cards = sgen_card_table_get_card_scan_address ((mword)block_start);
#else
- cards = cards_data;
- if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
- continue;
+ cards = cards_data;
+ if (!sgen_card_table_get_card_data (cards_data, (mword)block_start, CARDS_PER_BLOCK))
+ continue;
#endif
-
- if (!block->swept)
- sweep_block (block);
+ }
obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, 0);
end = block_start + MS_BLOCK_SIZE;
base = sgen_card_table_align_pointer (obj);
while (obj < end) {
- if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
- int card_offset = (obj - base) >> CARD_BITS;
- sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, queue);
+ int card_offset;
+
+ if (!block->swept)
+ sweep_block (block);
+
+ if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
+ goto next_large;
+
+ if (mod_union) {
+ /* FIXME: do this more efficiently */
+ int w, b;
+ MS_CALC_MARK_BIT (w, b, obj);
+ if (!MS_MARK_BIT (block, w, b))
+ goto next_large;
}
+
+ card_offset = (obj - base) >> CARD_BITS;
+ sgen_cardtable_scan_object (obj, block_obj_size, cards + card_offset, queue);
+
+ next_large:
obj += block_obj_size;
}
} else {
* Cards aliasing happens in powers of two, so as long as major blocks are aligned to their
* sizes, they won't overflow the cardtable overlap modulus.
*/
- card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
+ if (mod_union) {
+#ifdef SGEN_CONCURRENT_MARK
+ card_data = card_base = block->cardtable_mod_union;
+ /*
+ * This happens when the nursery
+ * collection that precedes finishing
+ * the concurrent collection allocates
+ * new major blocks.
+ */
+ if (!card_data)
+ continue;
+#else
+ g_assert_not_reached ();
+#endif
+ } else {
+ card_data = card_base = sgen_card_table_get_card_scan_address ((mword)block_start);
+ }
card_data_end = card_data + CARDS_PER_BLOCK;
for (card_data = initial_skip_card (card_data); card_data < card_data_end; ++card_data) { //card_data = skip_card (card_data + 1, card_data_end)) {
obj = (char*)MS_BLOCK_OBJ_FAST (block_start, block_obj_size, index);
while (obj < end) {
- if (MS_OBJ_ALLOCED_FAST (obj, block_start)) {
- HEAVY_STAT (++scanned_objects);
- scan_func (obj, queue);
+ if (!MS_OBJ_ALLOCED_FAST (obj, block_start))
+ goto next_small;
+
+ if (mod_union) {
+ /* FIXME: do this more efficiently */
+ int w, b;
+ MS_CALC_MARK_BIT (w, b, obj);
+ if (!MS_MARK_BIT (block, w, b))
+ goto next_small;
}
+
+ HEAVY_STAT (++scanned_objects);
+ scan_func (obj, queue);
+ next_small:
obj += block_obj_size;
}
HEAVY_STAT (if (*card_data) ++remarked_cards);
}
} END_FOREACH_BLOCK;
}
-#endif
-static gboolean
-major_is_worker_thread (MonoNativeThreadId thread)
+#ifdef SGEN_CONCURRENT_MARK
+static void
+update_cardtable_mod_union (void)
{
- if (concurrent_sweep)
- return thread == ms_sweep_thread;
- else
- return FALSE;
+ MSBlockInfo *block;
+
+ FOREACH_BLOCK (block) {
+ guint8 *cards;
+ gboolean init = FALSE;
+
+ if (!block->cardtable_mod_union) {
+ block->cardtable_mod_union = sgen_alloc_internal_dynamic (CARDS_PER_BLOCK,
+ INTERNAL_MEM_CARDTABLE_MOD_UNION, TRUE);
+ init = TRUE;
+ }
+
+ cards = sgen_card_table_get_card_scan_address ((mword)block->block);
+ if (init) {
+ memcpy (block->cardtable_mod_union, cards, CARDS_PER_BLOCK);
+ } else {
+ int i;
+ for (i = 0; i < CARDS_PER_BLOCK; ++i)
+ block->cardtable_mod_union [i] |= cards [i];
+ }
+ } END_FOREACH_BLOCK;
}
+#endif
+#endif
static void
alloc_free_block_lists (MSBlockInfo ***lists)
#undef pthread_create
static void
-post_param_init (void)
+post_param_init (SgenMajorCollector *collector)
{
- if (concurrent_sweep) {
- if (!mono_native_thread_create (&ms_sweep_thread, ms_sweep_thread_func, NULL)) {
- fprintf (stderr, "Error: Could not create sweep thread.\n");
- exit (1);
- }
- }
+ collector->sweeps_lazily = lazy_sweep;
}
void
+#ifdef SGEN_CONCURRENT_MARK
+sgen_marksweep_conc_init
+#else
#ifdef SGEN_PARALLEL_MARK
#ifdef FIXED_HEAP
sgen_marksweep_fixed_par_init
#else
sgen_marksweep_init
#endif
+#endif
#endif
(SgenMajorCollector *collector)
{
mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
mono_counters_register ("# major blocks lazy swept", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_lazy_swept);
mono_counters_register ("# major objects evacuated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_objects_evacuated);
- mono_counters_register ("Wait for sweep time", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &stat_time_wait_for_sweep);
#ifdef SGEN_PARALLEL_MARK
#ifndef HAVE_KW_THREAD
mono_native_tls_alloc (&workers_free_block_lists_key, NULL);
#endif
#endif
- /*
- * FIXME: These are superfluous if concurrent sweep is
- * disabled. We might want to create them lazily.
- */
- MONO_SEM_INIT (&ms_sweep_cmd_semaphore, 0);
- MONO_SEM_INIT (&ms_sweep_done_semaphore, 0);
-
collector->section_size = MAJOR_SECTION_SIZE;
#ifdef SGEN_PARALLEL_MARK
collector->is_parallel = TRUE;
collector->reset_worker_data = major_reset_worker_data;
#else
collector->is_parallel = FALSE;
+#endif
+#ifdef SGEN_CONCURRENT_MARK
+ collector->is_concurrent = TRUE;
+ collector->get_and_reset_num_major_objects_marked = major_get_and_reset_num_major_objects_marked;
+#else
+ collector->is_concurrent = FALSE;
#endif
collector->supports_cardtable = TRUE;
#ifdef SGEN_HAVE_CARDTABLE
collector->scan_card_table = major_scan_card_table;
collector->iterate_live_block_ranges = (void*)(void*) major_iterate_live_block_ranges;
+#ifdef SGEN_CONCURRENT_MARK
+ collector->update_cardtable_mod_union = update_cardtable_mod_union;
+#endif
#endif
collector->init_to_space = major_init_to_space;
collector->sweep = major_sweep;
collector->get_num_major_sections = get_num_major_sections;
collector->handle_gc_param = major_handle_gc_param;
collector->print_gc_param_usage = major_print_gc_param_usage;
- collector->is_worker_thread = major_is_worker_thread;
collector->post_param_init = post_param_init;
collector->is_valid_object = major_is_valid_object;
collector->describe_pointer = major_describe_pointer;
collector->major_ops.copy_or_mark_object = major_copy_or_mark_object;
collector->major_ops.scan_object = major_scan_object;
+#ifdef SGEN_CONCURRENT_MARK
+ collector->major_ops.scan_vtype = major_scan_vtype;
+#endif
#ifdef SGEN_HAVE_CARDTABLE
/*cardtable requires major pages to be 8 cards aligned*/
gboolean
sgen_need_major_collection (mword space_needed)
{
- mword los_alloced = los_memory_usage - MIN (last_collection_los_memory_usage, los_memory_usage);
+ mword los_alloced;
+ if (sgen_concurrent_collection_in_progress ())
+ return FALSE;
+ los_alloced = los_memory_usage - MIN (last_collection_los_memory_usage, los_memory_usage);
return (space_needed > sgen_memgov_available_free_space ()) ||
minor_collection_sections_alloced * major_collector.section_size + los_alloced > minor_collection_allowance;
}
}
void
-sgen_assert_memory_alloc (void *ptr, const char *assert_description)
+sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description)
{
if (ptr || !assert_description)
return;
- fprintf (stderr, "Error: Garbage collector could not allocate memory for %s.\n", assert_description);
+ fprintf (stderr, "Error: Garbage collector could not allocate %zu bytes of memory for %s.\n", requested_size, assert_description);
exit (1);
}
g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
ptr = mono_valloc (0, size, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
- sgen_assert_memory_alloc (ptr, assert_description);
+ sgen_assert_memory_alloc (ptr, size, assert_description);
if (ptr) {
SGEN_ATOMIC_ADD_P (total_alloc, size);
if (flags & SGEN_ALLOC_HEAP)
g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
ptr = mono_valloc_aligned (size, alignment, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE));
- sgen_assert_memory_alloc (ptr, assert_description);
+ sgen_assert_memory_alloc (ptr, size, assert_description);
if (ptr) {
SGEN_ATOMIC_ADD_P (total_alloc, size);
if (flags & SGEN_ALLOC_HEAP)
void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags) MONO_INTERNAL;
/* Error handling */
-void sgen_assert_memory_alloc (void *ptr, const char *assert_description) MONO_INTERNAL;
+void sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description) MONO_INTERNAL;
#endif
objsize = SGEN_ALIGN_UP (sgen_par_object_get_size (vt, (MonoObject*)obj));
has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
- destination = COLLECTOR_PARALLEL_ALLOC_FOR_PROMOTION (obj, objsize, has_references);
+ destination = COLLECTOR_PARALLEL_ALLOC_FOR_PROMOTION (vt, obj, objsize, has_references);
if (G_UNLIKELY (!destination)) {
sgen_parallel_pin_or_update (obj_slot, obj, vt, queue);
}
mword
-sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries)
+sgen_build_nursery_fragments (GCMemSection *nursery_section, void **start, int num_entries, SgenGrayQueue *unpin_queue)
{
char *frag_start, *frag_end;
size_t frag_size;
addr1 = frags_ranges->fragment_start;
if (addr0 < addr1) {
- SGEN_UNPIN_OBJECT (addr0);
+ if (unpin_queue)
+ GRAY_OBJECT_ENQUEUE (unpin_queue, addr0);
+ else
+ SGEN_UNPIN_OBJECT (addr0);
sgen_set_nursery_scan_start (addr0);
frag_end = addr0;
size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)addr0));
/*
+ * sgen-protocol.c: Binary protocol of internal activity, to aid
+ * debugging.
+ *
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef HAVE_SGEN_GC
}
void
-binary_protocol_collection (int index, int generation)
+binary_protocol_collection_begin (int index, int generation)
{
SGenProtocolCollection entry = { index, generation };
binary_protocol_flush_buffers (FALSE);
- protocol_entry (SGEN_PROTOCOL_COLLECTION, &entry, sizeof (SGenProtocolCollection));
+ protocol_entry (SGEN_PROTOCOL_COLLECTION_BEGIN, &entry, sizeof (SGenProtocolCollection));
+}
+
+void
+binary_protocol_collection_end (int index, int generation)
+{
+ SGenProtocolCollection entry = { index, generation };
+ binary_protocol_flush_buffers (FALSE);
+ protocol_entry (SGEN_PROTOCOL_COLLECTION_END, &entry, sizeof (SGenProtocolCollection));
}
void
/*
+ * sgen-protocol.h: Binary protocol of internal activity, to aid
+ * debugging.
+ *
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (C) 2012 Xamarin Inc
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "sgen-gc.h"
#ifdef SGEN_BINARY_PROTOCOL
enum {
- SGEN_PROTOCOL_COLLECTION,
+ SGEN_PROTOCOL_COLLECTION_BEGIN,
+ SGEN_PROTOCOL_COLLECTION_END,
SGEN_PROTOCOL_ALLOC,
SGEN_PROTOCOL_COPY,
SGEN_PROTOCOL_PIN,
void binary_protocol_flush_buffers (gboolean force) MONO_INTERNAL;
-void binary_protocol_collection (int index, int generation) MONO_INTERNAL;
+void binary_protocol_collection_begin (int index, int generation) MONO_INTERNAL;
+void binary_protocol_collection_end (int index, int generation) MONO_INTERNAL;
void binary_protocol_alloc (gpointer obj, gpointer vtable, int size) MONO_INTERNAL;
void binary_protocol_alloc_pinned (gpointer obj, gpointer vtable, int size) MONO_INTERNAL;
void binary_protocol_alloc_degraded (gpointer obj, gpointer vtable, int size) MONO_INTERNAL;
#define binary_protocol_is_enabled() FALSE
#define binary_protocol_flush_buffers(force)
-#define binary_protocol_collection(index, generation)
+#define binary_protocol_collection_begin(index, generation)
+#define binary_protocol_collection_end(index, generation)
#define binary_protocol_alloc(obj, vtable, size)
#define binary_protocol_alloc_pinned(obj, vtable, size)
#define binary_protocol_alloc_degraded(obj, vtable, size)
#include "metadata/sgen-protocol.h"
static inline char*
-alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
- return major_collector.alloc_object (objsize, has_references);
+ return major_collector.alloc_object (vtable, objsize, has_references);
}
static inline char*
-par_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+par_alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
- return major_collector.par_alloc_object (objsize, has_references);
+ return major_collector.par_alloc_object (vtable, objsize, has_references);
}
static SgenFragment*
}
static inline char*
-alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
char *p = NULL;
int age;
age = get_object_age (obj);
if (age >= promote_age)
- return major_collector.alloc_object (objsize, has_references);
+ return major_collector.alloc_object (vtable, objsize, has_references);
/* Promote! */
++age;
} else {
p = alloc_for_promotion_slow_path (age, objsize);
if (!p)
- p = major_collector.alloc_object (objsize, has_references);
+ return major_collector.alloc_object (vtable, objsize, has_references);
}
+ *(MonoVTable**)p = vtable;
+
return p;
}
}
static inline char*
-par_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+par_alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
char *p;
int age;
age = get_object_age (obj);
if (age >= promote_age)
- return major_collector.par_alloc_object (objsize, has_references);
+ return major_collector.par_alloc_object (vtable, objsize, has_references);
restart:
p = age_alloc_buffers [age].next;
/* Have we failed to promote to the nursery, lets just evacuate it to old gen. */
if (!p)
- p = major_collector.par_alloc_object (objsize, has_references);
+ return major_collector.par_alloc_object (vtable, objsize, has_references);
}
+ *(MonoVTable**)p = vtable;
+
return p;
}
static char*
-minor_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+minor_alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
/*
We only need to check for a non-nursery object if we're doing a major collection.
*/
if (!sgen_ptr_in_nursery (obj))
- return major_collector.alloc_object (objsize, has_references);
+ return major_collector.alloc_object (vtable, objsize, has_references);
- return alloc_for_promotion (obj, objsize, has_references);
+ return alloc_for_promotion (vtable, obj, objsize, has_references);
}
static char*
-minor_par_alloc_for_promotion (char *obj, size_t objsize, gboolean has_references)
+minor_par_alloc_for_promotion (MonoVTable *vtable, char *obj, size_t objsize, gboolean has_references)
{
/*
We only need to check for a non-nursery object if we're doing a major collection.
*/
if (!sgen_ptr_in_nursery (obj))
- return major_collector.par_alloc_object (objsize, has_references);
+ return major_collector.par_alloc_object (vtable, objsize, has_references);
- return par_alloc_for_promotion (obj, objsize, has_references);
+ return par_alloc_for_promotion (vtable, obj, objsize, has_references);
}
static SgenFragment*
sgen_process_togglerefs ();
mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation);
+ MONO_GC_WORLD_STOP_BEGIN ();
acquire_gc_locks ();
update_current_thread_stack (&count);
SGEN_LOG (3, "world stopped %d thread(s)", count);
mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation);
+ MONO_GC_WORLD_STOP_END ();
sgen_memgov_collection_start (generation);
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES))
sgen_gc_event_moves ();
mono_profiler_gc_event (MONO_GC_EVENT_PRE_START_WORLD, generation);
+ MONO_GC_WORLD_RESTART_BEGIN (generation);
FOREACH_THREAD (info) {
info->stack_start = NULL;
#ifdef USE_MONO_CTX
max_pause_usec = MAX (usec, max_pause_usec);
SGEN_LOG (2, "restarted %d thread(s) (pause time: %d usec, max: %d)", count, (int)usec, (int)max_pause_usec);
mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD, generation);
+ MONO_GC_WORLD_RESTART_END (generation);
bridge_process (generation);
}
void
-sgen_scan_togglerefs (CopyOrMarkObjectFunc copy_func, char *start, char *end, SgenGrayQueue *queue)
+sgen_scan_togglerefs (char *start, char *end, ScanCopyContext ctx)
{
+ CopyOrMarkObjectFunc copy_func = ctx.copy_func;
+ SgenGrayQueue *queue = ctx.queue;
int i;
SGEN_LOG (4, "Scanning ToggleRefs %d", toggleref_array_size);
/*
+ * sgen-workers.c: Worker threads for parallel and concurrent GC.
+ *
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
+ * Copyright (C) 2012 Xamarin Inc
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
*
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "config.h"
static int workers_num;
static WorkerData *workers_data;
-static WorkerData workers_gc_thread_data;
+static void *workers_gc_thread_major_collector_data = NULL;
-static SgenGrayQueue workers_distribute_gray_queue;
+static SgenSectionGrayQueue workers_distribute_gray_queue;
+static gboolean workers_distribute_gray_queue_inited;
static volatile gboolean workers_gc_in_progress = FALSE;
static volatile gboolean workers_marking = FALSE;
static volatile int workers_job_queue_num_entries = 0;
static volatile JobQueueEntry *workers_job_queue = NULL;
static LOCK_DECLARE (workers_job_queue_mutex);
+static int workers_num_jobs_enqueued = 0;
+static volatile int workers_num_jobs_finished = 0;
static long long stat_workers_stolen_from_self_lock;
static long long stat_workers_stolen_from_self_no_lock;
workers_wake_up (workers_num);
}
+void
+sgen_workers_wake_up_all (void)
+{
+ g_assert (workers_gc_in_progress);
+ workers_wake_up_all ();
+}
+
static void
workers_wait (void)
{
MONO_SEM_WAIT (&workers_waiting_sem);
}
+static gboolean
+collection_needs_workers (void)
+{
+ return sgen_collection_is_parallel () || sgen_collection_is_concurrent ();
+}
+
void
sgen_workers_enqueue_job (JobFunc func, void *data)
{
int num_entries;
JobQueueEntry *entry;
- if (!sgen_collection_is_parallel ()) {
+ if (!collection_needs_workers ()) {
func (NULL, data);
return;
}
entry->next = workers_job_queue;
workers_job_queue = entry;
num_entries = ++workers_job_queue_num_entries;
+ ++workers_num_jobs_enqueued;
mono_mutex_unlock (&workers_job_queue_mutex);
workers_wake_up (num_entries);
}
+void
+sgen_workers_wait_for_jobs (void)
+{
+ // FIXME: implement this properly
+ while (workers_num_jobs_finished < workers_num_jobs_enqueued)
+ g_usleep (1000);
+}
+
static gboolean
workers_dequeue_and_do_job (WorkerData *data)
{
JobQueueEntry *entry;
+ int num_finished;
/*
* At this point the GC might not be running anymore. We
if (!entry)
return FALSE;
- g_assert (sgen_collection_is_parallel ());
+ g_assert (collection_needs_workers ());
entry->func (data, entry->data);
sgen_free_internal (entry, INTERNAL_MEM_JOB_QUEUE_ENTRY);
+
+ SGEN_ATOMIC_ADD (workers_num_jobs_finished, 1);
+
return TRUE;
}
static gboolean
workers_get_work (WorkerData *data)
{
+ SgenMajorCollector *major;
int i;
g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
if (workers_steal (data, data, TRUE))
return TRUE;
- /* Then from the GC thread's stack. */
- if (workers_steal (data, &workers_gc_thread_data, TRUE))
- return TRUE;
-
- /* Finally, from another worker. */
+ /* From another worker. */
for (i = 0; i < workers_num; ++i) {
WorkerData *victim_data = &workers_data [i];
if (data == victim_data)
return TRUE;
}
+ /*
+ * If we're concurrent or parallel, from the workers
+ * distribute gray queue.
+ */
+ major = sgen_get_major_collector ();
+ if (major->is_concurrent || major->is_parallel) {
+ GrayQueueSection *section = sgen_section_gray_queue_dequeue (&workers_distribute_gray_queue);
+ if (section) {
+ sgen_gray_object_enqueue_section (&data->private_gray_queue, section);
+ return TRUE;
+ }
+ }
+
/* Nobody to steal from */
g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
return FALSE;
sgen_gray_object_free_queue_section (section);
}
- if (data != &workers_gc_thread_data && sgen_gray_object_queue_is_empty (queue))
+ if (sgen_gray_object_queue_is_empty (queue))
workers_steal (data, data, FALSE);
mono_mutex_unlock (&data->stealable_stack_mutex);
workers_wake_up_all ();
}
+static void
+concurrent_enqueue_check (char *obj)
+{
+ g_assert (sgen_concurrent_collection_in_progress ());
+ g_assert (!sgen_ptr_in_nursery (obj));
+ g_assert (SGEN_LOAD_VTABLE (obj));
+}
+
+static void
+init_private_gray_queue (WorkerData *data)
+{
+ sgen_gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue,
+ sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL,
+ workers_gray_queue_share_redirect, data);
+}
+
static mono_native_thread_return_t
workers_thread_func (void *data_untyped)
{
if (sgen_get_major_collector ()->init_worker_thread)
sgen_get_major_collector ()->init_worker_thread (data->major_collector_data);
- sgen_gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue,
- workers_gray_queue_share_redirect, data);
+ init_private_gray_queue (data);
for (;;) {
gboolean did_work = FALSE;
}
if (workers_marking && (!sgen_gray_object_queue_is_empty (&data->private_gray_queue) || workers_get_work (data))) {
+ ScanCopyContext ctx = { sgen_get_major_collector ()->major_ops.scan_object, NULL,
+ &data->private_gray_queue };
+
g_assert (!sgen_gray_object_queue_is_empty (&data->private_gray_queue));
- while (!sgen_drain_gray_stack (&data->private_gray_queue, 32))
+ while (!sgen_drain_gray_stack (32, ctx))
workers_gray_queue_share_redirect (&data->private_gray_queue);
g_assert (sgen_gray_object_queue_is_empty (&data->private_gray_queue));
- sgen_gray_object_queue_init (&data->private_gray_queue);
+ init_private_gray_queue (data);
did_work = TRUE;
}
return NULL;
}
-void
-sgen_workers_distribute_gray_queue_sections (void)
+static void
+init_distribute_gray_queue (gboolean locked)
{
- if (!sgen_collection_is_parallel ())
+ if (workers_distribute_gray_queue_inited) {
+ g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
+ g_assert (!workers_distribute_gray_queue.locked == !locked);
return;
+ }
- workers_gray_queue_share_redirect (&workers_distribute_gray_queue);
+ sgen_section_gray_queue_init (&workers_distribute_gray_queue, locked,
+ sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
+ workers_distribute_gray_queue_inited = TRUE;
}
void
sgen_workers_init_distribute_gray_queue (void)
{
- if (!sgen_collection_is_parallel ())
+ if (!collection_needs_workers ())
return;
- sgen_gray_object_queue_init (&workers_distribute_gray_queue);
+ init_distribute_gray_queue (sgen_get_major_collector ()->is_concurrent || sgen_get_major_collector ()->is_parallel);
}
void
{
int i;
- if (!sgen_get_major_collector ()->is_parallel)
+ if (!sgen_get_major_collector ()->is_parallel && !sgen_get_major_collector ()->is_concurrent)
return;
//g_print ("initing %d workers\n", num_workers);
MONO_SEM_INIT (&workers_waiting_sem, 0);
MONO_SEM_INIT (&workers_done_sem, 0);
- sgen_gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue,
- workers_gray_queue_share_redirect, &workers_gc_thread_data);
- mono_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL);
- workers_gc_thread_data.stealable_stack_fill = 0;
+ init_distribute_gray_queue (sgen_get_major_collector ()->is_concurrent || sgen_get_major_collector ()->is_parallel);
if (sgen_get_major_collector ()->alloc_worker_data)
- workers_gc_thread_data.major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
+ workers_gc_thread_major_collector_data = sgen_get_major_collector ()->alloc_worker_data ();
for (i = 0; i < workers_num; ++i) {
/* private gray queue is inited by the thread itself */
{
int i;
- if (!sgen_collection_is_parallel ())
+ if (!collection_needs_workers ())
return;
if (sgen_get_major_collector ()->init_worker_thread)
- sgen_get_major_collector ()->init_worker_thread (workers_gc_thread_data.major_collector_data);
+ sgen_get_major_collector ()->init_worker_thread (workers_gc_thread_major_collector_data);
g_assert (!workers_gc_in_progress);
workers_gc_in_progress = TRUE;
workers_marking = FALSE;
workers_done_posted = 0;
+ g_assert (workers_job_queue_num_entries == 0);
+ workers_num_jobs_enqueued = 0;
+ workers_num_jobs_finished = 0;
+
if (workers_started) {
if (workers_num_waiting != workers_num)
g_error ("Expecting all %d sgen workers to be parked, but only %d are", workers_num, workers_num_waiting);
workers_started = TRUE;
}
+gboolean
+sgen_workers_have_started (void)
+{
+ return workers_gc_in_progress;
+}
+
void
sgen_workers_start_marking (void)
{
- if (!sgen_collection_is_parallel ())
+ if (!collection_needs_workers ())
return;
g_assert (workers_started && workers_gc_in_progress);
{
int i;
- if (!sgen_collection_is_parallel ())
+ if (!collection_needs_workers ())
return;
- g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
- g_assert (sgen_gray_object_queue_is_empty (&workers_distribute_gray_queue));
-
g_assert (workers_gc_in_progress);
workers_gc_in_progress = FALSE;
if (workers_num_waiting == workers_num) {
g_assert (workers_done_posted);
- g_assert (!workers_gc_thread_data.stealable_stack_fill);
- g_assert (sgen_gray_object_queue_is_empty (&workers_gc_thread_data.private_gray_queue));
+ g_assert (sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue));
for (i = 0; i < workers_num; ++i) {
g_assert (!workers_data [i].stealable_stack_fill);
g_assert (sgen_gray_object_queue_is_empty (&workers_data [i].private_gray_queue));
}
}
+gboolean
+sgen_workers_all_done (void)
+{
+ return workers_num_waiting == workers_num;
+}
+
gboolean
sgen_is_worker_thread (MonoNativeThreadId thread)
{
return FALSE;
}
-gboolean
-sgen_workers_is_distributed_queue (SgenGrayQueue *queue)
-{
- return queue == &workers_distribute_gray_queue;
-}
-
-SgenGrayQueue*
-sgen_workers_get_distribute_gray_queue (void)
+SgenSectionGrayQueue*
+sgen_workers_get_distribute_section_gray_queue (void)
{
return &workers_distribute_gray_queue;
}
sgen_workers_reset_data (void)
{
if (sgen_get_major_collector ()->reset_worker_data)
- sgen_get_major_collector ()->reset_worker_data (workers_gc_thread_data.major_collector_data);
-
+ sgen_get_major_collector ()->reset_worker_data (workers_gc_thread_major_collector_data);
}
#endif
/*
+ * sgen-workers.c: Worker threads for parallel and concurrent GC.
+ *
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
+ * Copyright (C) 2012 Xamarin Inc
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
*
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
#ifndef __MONO_SGEN_WORKER_H__
#define __MONO_SGEN_WORKER_H__
void sgen_workers_init (int num_workers) MONO_INTERNAL;
void sgen_workers_start_all_workers (void) MONO_INTERNAL;
+gboolean sgen_workers_have_started (void) MONO_INTERNAL;
+void sgen_workers_wake_up_all (void) MONO_INTERNAL;
void sgen_workers_init_distribute_gray_queue (void) MONO_INTERNAL;
void sgen_workers_enqueue_job (JobFunc func, void *data) MONO_INTERNAL;
+void sgen_workers_wait_for_jobs (void) MONO_INTERNAL;
void sgen_workers_start_marking (void) MONO_INTERNAL;
void sgen_workers_distribute_gray_queue_sections (void) MONO_INTERNAL;
void sgen_workers_reset_data (void) MONO_INTERNAL;
void sgen_workers_join (void) MONO_INTERNAL;
-gboolean sgen_workers_is_distributed_queue (SgenGrayQueue *queue) MONO_INTERNAL;
-SgenGrayQueue* sgen_workers_get_distribute_gray_queue (void) MONO_INTERNAL;
+gboolean sgen_workers_all_done (void) MONO_INTERNAL;
+SgenSectionGrayQueue* sgen_workers_get_distribute_section_gray_queue (void) MONO_INTERNAL;
#endif
tags:
etags -o TAGS `find .. -name "*.h" -o -name "*.c"`
+
+#if HAS_EXTENSION_MODULE
+#include $(top_srcdir)/../mono-extensions/mono/mini/Makefile.am
+#endif
gboolean direct_pinvoke;
gboolean direct_icalls;
gboolean no_direct_calls;
+ gboolean use_trampolines_page;
int nthreads;
int ntrampolines;
int nrgctx_trampolines;
#endif
}
+/*
+ * arch_emit_specific_trampoline_pages:
+ *
+ * Emits a page full of trampolines: each trampoline uses its own address to
+ * lookup both the generic trampoline code and the data argument.
+ * This page can be remapped in process multiple times so we can get an
+ * unlimited number of trampolines.
+ * Specifically this implementation uses the following trick: two memory pages
+ * are allocated, with the first containing the data and the second containing the trampolines.
+ * To reduce trampoline size, each trampoline jumps at the start of the page where a common
+ * implementation does all the lifting.
+ * Note that the ARM single trampoline size is 8 bytes, exactly like the data that needs to be stored
+ * on the arm 32 bit system.
+ */
+static void
+arch_emit_specific_trampoline_pages (MonoAotCompile *acfg)
+{
+#if defined(TARGET_ARM)
+ guint8 buf [128];
+ guint8 *code;
+ guint8 *loop_start, *loop_branch_back, *loop_end_check, *imt_found_check;
+ int i;
+#define COMMON_TRAMP_SIZE 16
+ int count = (mono_pagesize () - COMMON_TRAMP_SIZE) / 8;
+ int imm8, rot_amount;
+
+ if (!acfg->aot_opts.use_trampolines_page)
+ return;
+
+ emit_alignment (acfg, mono_pagesize ());
+ emit_global (acfg, "specific_trampolines_page", TRUE);
+ emit_label (acfg, "specific_trampolines_page");
+
+ /* emit the generic code first, the trampoline address + 8 is in the lr register */
+ code = buf;
+ imm8 = mono_arm_is_rotated_imm8 (mono_pagesize (), &rot_amount);
+ ARM_SUB_REG_IMM (code, ARMREG_LR, ARMREG_LR, imm8, rot_amount);
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, -8);
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_LR, -4);
+ ARM_NOP (code);
+ g_assert (code - buf == COMMON_TRAMP_SIZE);
+
+ /* Emit it */
+ emit_bytes (acfg, buf, code - buf);
+
+ for (i = 0; i < count; ++i) {
+ code = buf;
+ ARM_PUSH (code, 0x5fff);
+ ARM_BL (code, 0);
+ arm_patch (code - 4, code - COMMON_TRAMP_SIZE - 8 * (i + 1));
+ g_assert (code - buf == 8);
+ emit_bytes (acfg, buf, code - buf);
+ }
+
+ /* now the rgctx trampolines: each specific trampolines puts in the ip register
+ * the instruction pointer address, so the generic trampoline at the start of the page
+ * subtracts 4096 to get to the data page and loads the values
+ * We again fit the generic trampiline in 16 bytes.
+ */
+ emit_global (acfg, "rgctx_trampolines_page", TRUE);
+ emit_label (acfg, "rgctx_trampolines_page");
+ code = buf;
+ imm8 = mono_arm_is_rotated_imm8 (mono_pagesize (), &rot_amount);
+ ARM_SUB_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
+ ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_IP, -8);
+ ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, -4);
+ ARM_NOP (code);
+ g_assert (code - buf == COMMON_TRAMP_SIZE);
+
+ /* Emit it */
+ emit_bytes (acfg, buf, code - buf);
+
+ for (i = 0; i < count; ++i) {
+ code = buf;
+ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
+ ARM_B (code, 0);
+ arm_patch (code - 4, code - COMMON_TRAMP_SIZE - 8 * (i + 1));
+ g_assert (code - buf == 8);
+ emit_bytes (acfg, buf, code - buf);
+ }
+ /* now the imt trampolines: each specific trampolines puts in the ip register
+ * the instruction pointer address, so the generic trampoline at the start of the page
+ * subtracts 4096 to get to the data page and loads the values
+ * We again fit the generic trampiline in 16 bytes.
+ */
+#define IMT_TRAMP_SIZE 72
+ emit_global (acfg, "imt_trampolines_page", TRUE);
+ emit_label (acfg, "imt_trampolines_page");
+ code = buf;
+ /* Need at least two free registers, plus a slot for storing the pc */
+ ARM_PUSH (code, (1 << ARMREG_R0)|(1 << ARMREG_R1)|(1 << ARMREG_R2));
+
+ imm8 = mono_arm_is_rotated_imm8 (mono_pagesize (), &rot_amount);
+ ARM_SUB_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, -8);
+
+ /* The IMT method is in v5, r0 has the imt array address */
+
+ loop_start = code;
+ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, 0);
+ ARM_CMP_REG_REG (code, ARMREG_R1, ARMREG_V5);
+ imt_found_check = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+
+ /* End-of-loop check */
+ ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
+ loop_end_check = code;
+ ARM_B_COND (code, ARMCOND_EQ, 0);
+
+ /* Loop footer */
+ ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (gpointer) * 2);
+ loop_branch_back = code;
+ ARM_B (code, 0);
+ arm_patch (loop_branch_back, loop_start);
+
+ /* Match */
+ arm_patch (imt_found_check, code);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 0);
+ /* Save it to the third stack slot */
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, 8);
+ /* Restore the registers and branch */
+ ARM_POP (code, (1 << ARMREG_R0)|(1 << ARMREG_R1)|(1 << ARMREG_PC));
+
+ /* No match */
+ arm_patch (loop_end_check, code);
+ ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
+ ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, 8);
+ ARM_POP (code, (1 << ARMREG_R0)|(1 << ARMREG_R1)|(1 << ARMREG_PC));
+ ARM_NOP (code);
+
+ /* Emit it */
+ g_assert (code - buf == IMT_TRAMP_SIZE);
+ emit_bytes (acfg, buf, code - buf);
+
+ for (i = 0; i < count; ++i) {
+ code = buf;
+ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
+ ARM_B (code, 0);
+ arm_patch (code - 4, code - IMT_TRAMP_SIZE - 8 * (i + 1));
+ g_assert (code - buf == 8);
+ emit_bytes (acfg, buf, code - buf);
+ }
+#endif
+}
+
/*
* arch_emit_specific_trampoline:
*
* method.
*/
for (tramp_type = 0; tramp_type < MONO_TRAMPOLINE_NUM; ++tramp_type) {
- mono_arch_create_generic_trampoline (tramp_type, &info, TRUE);
+ /* we overload the boolean here to indicate the slightly different trampoline needed, see mono_arch_create_generic_trampoline() */
+ mono_arch_create_generic_trampoline (tramp_type, &info, acfg->aot_opts.use_trampolines_page? 2: TRUE);
emit_trampoline (acfg, acfg->got_offset, info);
}
emit_label (acfg, end_symbol);
}
+ arch_emit_specific_trampoline_pages (acfg);
+
/* Reserve some entries at the end of the GOT for our use */
acfg->num_trampoline_got_entries = tramp_got_offset - acfg->got_offset;
}
opts->asm_writer = TRUE;
} else if (str_begins_with (arg, "nodebug")) {
opts->nodebug = TRUE;
+ } else if (str_begins_with (arg, "nopagetrampolines")) {
+ opts->use_trampolines_page = FALSE;
} else if (str_begins_with (arg, "ntrampolines=")) {
opts->ntrampolines = atoi (arg + strlen ("ntrampolines="));
} else if (str_begins_with (arg, "nrgctx-trampolines=")) {
}
}
+ if (opts->use_trampolines_page) {
+ opts->ntrampolines = 0;
+ opts->nrgctx_trampolines = 0;
+ opts->nimt_trampolines = 0;
+ }
g_strfreev (args);
}
acfg->aot_opts.nrgctx_trampolines = 1024;
acfg->aot_opts.nimt_trampolines = 128;
acfg->aot_opts.llvm_path = g_strdup ("");
+#if MONOTOUCH
+ acfg->aot_opts.use_trampolines_page = TRUE;
+#endif
mono_aot_parse_options (aot_options, &acfg->aot_opts);
/* The first unused trampoline of each kind */
guint32 trampoline_index [MONO_AOT_TRAMP_NUM];
+ gboolean use_page_trampolines;
+
MonoAotFileInfo info;
gpointer *globals;
MonoDl *sofile;
} MonoAotModule;
+typedef struct {
+ void *next;
+ unsigned char *trampolines;
+ unsigned char *trampolines_end;
+} TrampolinePage;
+
static GHashTable *aot_modules;
#define mono_aot_lock() EnterCriticalSection (&aot_mutex)
#define mono_aot_unlock() LeaveCriticalSection (&aot_mutex)
static GHashTable *aot_jit_icall_hash;
+#ifdef MONOTOUCH
+#define USE_PAGE_TRAMPOLINES ((MonoAotModule*)mono_defaults.corlib->aot_module)->use_page_trampolines
+#else
+#define USE_PAGE_TRAMPOLINES 0
+#endif
+
+#define mono_aot_page_lock() EnterCriticalSection (&aot_page_mutex)
+#define mono_aot_page_unlock() LeaveCriticalSection (&aot_page_mutex)
+static CRITICAL_SECTION aot_page_mutex;
+
static void
init_plt (MonoAotModule *info);
assembly->image->aot_module = amodule;
if (mono_aot_only) {
+ char *code;
+ find_symbol (amodule->sofile, amodule->globals, "specific_trampolines_page", (gpointer *)&code);
+ amodule->use_page_trampolines = code != NULL;
+ /*g_warning ("using page trampolines: %d", amodule->use_page_trampolines);*/
if (mono_defaults.corlib) {
/* The second got slot contains the mscorlib got addr */
MonoAotModule *mscorlib_amodule = mono_defaults.corlib->aot_module;
mono_aot_init (void)
{
InitializeCriticalSection (&aot_mutex);
+ InitializeCriticalSection (&aot_page_mutex);
aot_modules = g_hash_table_new (NULL, NULL);
mono_install_assembly_load_hook (load_aot_module, NULL);
return mono_create_ftnptr_malloc (load_function (amodule, name));
}
+#ifdef MONOTOUCH
+#include <mach/mach.h>
+
+static TrampolinePage* trampoline_pages [MONO_AOT_TRAMP_NUM];
+/* these sizes are for ARM code, parametrize if porting to other architectures (see arch_emit_specific_trampoline_pages)
+ * trampoline size is assumed to be 8 bytes below as well (8 is the minimum for 32 bit archs, since we need to store
+ * two pointers for trampoline in the data page).
+ * the minimum for the common code must be at least sizeof(TrampolinePage), since we store the page info at the
+ * beginning of the data page.
+ */
+static const int trampolines_pages_code_offsets [MONO_AOT_TRAMP_NUM] = {16, 16, 72};
+
+static unsigned char*
+get_new_trampoline_from_page (int tramp_type)
+{
+ MonoAotModule *amodule;
+ MonoImage *image;
+ TrampolinePage *page;
+ int count;
+ void *tpage;
+ vm_address_t addr, taddr;
+ kern_return_t ret;
+ vm_prot_t prot, max_prot;
+ int psize;
+ unsigned char *code;
+
+ mono_aot_page_lock ();
+ page = trampoline_pages [tramp_type];
+ if (page && page->trampolines < page->trampolines_end) {
+ code = page->trampolines;
+ page->trampolines += 8;
+ mono_aot_page_unlock ();
+ return code;
+ }
+ mono_aot_page_unlock ();
+ psize = mono_pagesize ();
+ /* the trampoline template page is in the mscorlib module */
+ image = mono_defaults.corlib;
+ g_assert (image);
+
+ amodule = image->aot_module;
+ g_assert (amodule);
+
+ if (tramp_type == MONO_AOT_TRAMP_SPECIFIC)
+ tpage = load_function (amodule, "specific_trampolines_page");
+ else if (tramp_type == MONO_AOT_TRAMP_STATIC_RGCTX)
+ tpage = load_function (amodule, "rgctx_trampolines_page");
+ else if (tramp_type == MONO_AOT_TRAMP_IMT_THUNK)
+ tpage = load_function (amodule, "imt_trampolines_page");
+ else
+ g_error ("Incorrect tramp type for trampolines page");
+ g_assert (tpage);
+ /*g_warning ("loaded trampolines page at %x", tpage);*/
+
+ /* avoid the unlikely case of looping forever */
+ count = 40;
+ page = NULL;
+ while (page == NULL && count-- > 0) {
+ addr = 0;
+ /* allocate two contiguous pages of memory: the first page will contain the data (like a local constant pool)
+ * while the second will contain the trampolines.
+ */
+ ret = vm_allocate (mach_task_self (), &addr, psize * 2, VM_FLAGS_ANYWHERE);
+ if (ret != KERN_SUCCESS) {
+ g_error ("Cannot allocate memory for trampolines: %d", ret);
+ break;
+ }
+ /*g_warning ("allocated trampoline double page at %x", addr);*/
+ /* replace the second page with a remapped trampoline page */
+ taddr = addr + psize;
+ vm_deallocate (mach_task_self (), taddr, psize);
+ ret = vm_remap (mach_task_self (), &taddr, psize, 0, FALSE, mach_task_self(), (vm_address_t)tpage, FALSE, &prot, &max_prot, VM_INHERIT_SHARE);
+ if (ret != KERN_SUCCESS) {
+ /* someone else got the page, try again */
+ vm_deallocate (mach_task_self (), addr, psize);
+ continue;
+ }
+ /*g_warning ("remapped trampoline page at %x", taddr);*/
+
+ mono_aot_page_lock ();
+ page = trampoline_pages [tramp_type];
+ /* some other thread already allocated, so use that to avoid wasting memory */
+ if (page && page->trampolines < page->trampolines_end) {
+ code = page->trampolines;
+ page->trampolines += 8;
+ mono_aot_page_unlock ();
+ vm_deallocate (mach_task_self (), addr, psize);
+ vm_deallocate (mach_task_self (), taddr, psize);
+ return code;
+ }
+ page = (TrampolinePage*)addr;
+ page->next = trampoline_pages [tramp_type];
+ trampoline_pages [tramp_type] = page;
+ page->trampolines = (void*)(taddr + trampolines_pages_code_offsets [tramp_type]);
+ page->trampolines_end = (void*)(taddr + psize);
+ code = page->trampolines;
+ page->trampolines += 8;
+ mono_aot_page_unlock ();
+ return code;
+ }
+ g_error ("Cannot allocate more trampoline pages: %d", ret);
+ return NULL;
+}
+
+#else
+static unsigned char*
+get_new_trampoline_from_page (int tramp_type)
+{
+ g_error ("Page trampolines not supported.");
+ return NULL;
+}
+#endif
+
+
+static gpointer
+get_new_specific_trampoline_from_page (gpointer tramp, gpointer arg)
+{
+ void *code;
+ gpointer *data;
+
+ code = get_new_trampoline_from_page (MONO_AOT_TRAMP_SPECIFIC);
+
+ data = (gpointer*)((char*)code - mono_pagesize ());
+ data [0] = arg;
+ data [1] = tramp;
+ /*g_warning ("new trampoline at %p for data %p, tramp %p (stored at %p)", code, arg, tramp, data);*/
+ return code;
+
+}
+
+static gpointer
+get_new_rgctx_trampoline_from_page (gpointer tramp, gpointer arg)
+{
+ void *code;
+ gpointer *data;
+
+ code = get_new_trampoline_from_page (MONO_AOT_TRAMP_STATIC_RGCTX);
+
+ data = (gpointer*)((char*)code - mono_pagesize ());
+ data [0] = arg;
+ data [1] = tramp;
+ /*g_warning ("new rgctx trampoline at %p for data %p, tramp %p (stored at %p)", code, arg, tramp, data);*/
+ return code;
+
+}
+
+static gpointer
+get_new_imt_trampoline_from_page (gpointer arg)
+{
+ void *code;
+ gpointer *data;
+
+ code = get_new_trampoline_from_page (MONO_AOT_TRAMP_IMT_THUNK);
+
+ data = (gpointer*)((char*)code - mono_pagesize ());
+ data [0] = arg;
+ /*g_warning ("new imt trampoline at %p for data %p, (stored at %p)", code, arg, data);*/
+ return code;
+
+}
+
/* Return a given kind of trampoline */
static gpointer
get_numerous_trampoline (MonoAotTrampoline tramp_type, int n_got_slots, MonoAotModule **out_amodule, guint32 *got_offset, guint32 *out_tramp_size)
tramp = generic_trampolines [tramp_type];
g_assert (tramp);
- code = get_numerous_trampoline (MONO_AOT_TRAMP_SPECIFIC, 2, &amodule, &got_offset, &tramp_size);
+ if (USE_PAGE_TRAMPOLINES) {
+ code = get_new_specific_trampoline_from_page (tramp, arg1);
+ tramp_size = 8;
+ } else {
+ code = get_numerous_trampoline (MONO_AOT_TRAMP_SPECIFIC, 2, &amodule, &got_offset, &tramp_size);
- amodule->got [got_offset] = tramp;
- amodule->got [got_offset + 1] = arg1;
+ amodule->got [got_offset] = tramp;
+ amodule->got [got_offset + 1] = arg1;
+ }
if (code_len)
*code_len = tramp_size;
guint8 *code;
guint32 got_offset;
- code = get_numerous_trampoline (MONO_AOT_TRAMP_STATIC_RGCTX, 2, &amodule, &got_offset, NULL);
+ if (USE_PAGE_TRAMPOLINES) {
+ code = get_new_rgctx_trampoline_from_page (addr, ctx);
+ } else {
+ code = get_numerous_trampoline (MONO_AOT_TRAMP_STATIC_RGCTX, 2, &amodule, &got_offset, NULL);
- amodule->got [got_offset] = ctx;
- amodule->got [got_offset + 1] = addr;
+ amodule->got [got_offset] = ctx;
+ amodule->got [got_offset + 1] = addr;
+ }
/* The caller expects an ftnptr */
return mono_create_ftnptr (mono_domain_get (), code);
int i, index, real_count;
MonoAotModule *amodule;
- code = get_numerous_trampoline (MONO_AOT_TRAMP_IMT_THUNK, 1, &amodule, &got_offset, NULL);
-
real_count = 0;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
buf [(index * 2)] = NULL;
buf [(index * 2) + 1] = fail_tramp;
- amodule->got [got_offset] = buf;
+ if (USE_PAGE_TRAMPOLINES) {
+ code = get_new_imt_trampoline_from_page (buf);
+ } else {
+ code = get_numerous_trampoline (MONO_AOT_TRAMP_IMT_THUNK, 1, &amodule, &got_offset, NULL);
+
+ amodule->got [got_offset] = buf;
+ }
return code;
}
* done:
*/
- if (value != AMD64_RDX)
- amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
- amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
- if (shifted_nursery_start >> 31) {
- /*
- * The value we need to compare against is 64 bits, so we need
- * another spare register. We use RBX, which we save and
- * restore.
- */
- amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
- amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
- amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
- } else {
- amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ if (mono_gc_card_table_nursery_check ()) {
+ if (value != AMD64_RDX)
+ amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
+ amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
+ if (shifted_nursery_start >> 31) {
+ /*
+ * The value we need to compare against is 64 bits, so we need
+ * another spare register. We use RBX, which we save and
+ * restore.
+ */
+ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
+ amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
+ amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
+ } else {
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ }
+ br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
}
- br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
if (card_table_mask)
amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
- x86_patch (br, code);
+ if (mono_gc_card_table_nursery_check ())
+ x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
* before the liveness pass. We emit OP_GC_LIVENESS_DEF instructions for
* them during VZERO decomposition.
*/
- if (!pc_offsets [vmv->vreg])
- pin = TRUE;
+ if (!is_arg) {
+ if (!pc_offsets [vmv->vreg])
+ pin = TRUE;
- if (ins->backend.is_pinvoke)
- pin = TRUE;
+ if (ins->backend.is_pinvoke)
+ pin = TRUE;
+ }
if (bitmap) {
for (cindex = 0; cindex < gcfg->ncallsites; ++cindex) {
* done:
*/
- if (value != X86_EDX)
- x86_mov_reg_reg (code, X86_EDX, value, 4);
- x86_shift_reg_imm (code, X86_SHR, X86_EDX, nursery_shift);
- x86_alu_reg_imm (code, X86_CMP, X86_EDX, nursery_start >> nursery_shift);
- br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
+ if (mono_gc_card_table_nursery_check ()) {
+ if (value != X86_EDX)
+ x86_mov_reg_reg (code, X86_EDX, value, 4);
+ x86_shift_reg_imm (code, X86_SHR, X86_EDX, nursery_shift);
+ x86_alu_reg_imm (code, X86_CMP, X86_EDX, nursery_start >> nursery_shift);
+ br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
+ }
x86_mov_reg_reg (code, X86_EDX, ptr, 4);
x86_shift_reg_imm (code, X86_SHR, X86_EDX, card_table_shift);
if (card_table_mask)
x86_alu_reg_imm (code, X86_AND, X86_EDX, (int)card_table_mask);
x86_mov_membase_imm (code, X86_EDX, card_table, 1, 1);
- x86_patch (br, code);
+ if (mono_gc_card_table_nursery_check ())
+ x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (aot && tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
/*
+ * For page trampolines the data is in r1, so just move it, otherwise use the got slot as below.
* The trampoline contains a pc-relative offset to the got slot
* preceeding the got slot where the value is stored. The offset can be
* found at [lr + 0].
*/
- ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
- ARM_ADD_REG_IMM (code, ARMREG_V2, ARMREG_V2, 4, 0);
- ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
+ if (aot == 2) {
+ ARM_MOV_REG_REG (code, ARMREG_V2, ARMREG_R1);
+ } else {
+ ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
+ ARM_ADD_REG_IMM (code, ARMREG_V2, ARMREG_V2, 4, 0);
+ ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
+ }
} else {
if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT)
ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
# Disabled until ?mcs is fixed
# bug-331958.cs
BASE_TEST_CS_SRC= \
+ bug-2907.cs \
array-init.cs \
arraylist.cs \
assemblyresolve_event.cs \
--- /dev/null
+using System;
+using System.IO;
+using System.Xml.Serialization;
+
+
+class Program
+{
+ static public T DeserializeFromString<T>(string xml) where T : class
+ {
+
+ if (String.IsNullOrEmpty(xml))
+ {
+ return null;
+ }
+
+ StringReader reader = null;
+ T deserializedObject = null;
+ try
+ {
+ reader = new StringReader(xml);
+ XmlSerializer serializer = new XmlSerializer(typeof(T));
+ deserializedObject = serializer.Deserialize(reader) as T;
+ }
+ finally
+ {
+ if (null != reader)
+ {
+ reader.Close();
+ }
+ }
+ return deserializedObject;
+ }
+
+
+ static void Main(string[] args)
+ {
+ string myXML = @"<?xml version=""1.0"" encoding=""utf-8""?><TASK><OptionA/></TASK>";
+
+ // The following line fails on Mono 2.8 2.10 2.10.8.1 2.10.9
+ TASK data = DeserializeFromString<TASK>(myXML);
+ if(data == null)
+ {
+ throw new Exception("A#01");
+ }
+ if(data.ItemElementName != ItemChoiceType.OptionA)
+ {
+ throw new Exception("A#02");
+ }
+ }
+}
+
+// Below is the code generated from the following XSD:
+/*
+<?xml version="1.0" encoding="UTF-8"?>
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" attributeFormDefault="unqualified">
+ <xs:element name="TASK">
+ <xs:complexType>
+ <xs:choice>
+ <xs:element name="OptionA"/>
+ <xs:element name="OptionB"/>
+ </xs:choice>
+ </xs:complexType>
+ </xs:element>
+</xs:schema>
+*/
+
+//------------------------------------------------------------------------------
+// <auto-generated>
+// This code was generated by a tool.
+// Runtime Version:4.0.30319.239
+//
+// Changes to this file may cause incorrect behavior and will be lost if
+// the code is regenerated.
+// </auto-generated>
+//------------------------------------------------------------------------------
+
+//
+// This source code was auto-generated by xsd, Version=4.0.30319.1.
+//
+
+
+/// <remarks/>
+[System.CodeDom.Compiler.GeneratedCodeAttribute("xsd", "4.0.30319.1")]
+[System.SerializableAttribute()]
+[System.Diagnostics.DebuggerStepThroughAttribute()]
+[System.ComponentModel.DesignerCategoryAttribute("code")]
+[System.Xml.Serialization.XmlTypeAttribute(AnonymousType=true)]
+[System.Xml.Serialization.XmlRootAttribute(Namespace="", IsNullable=false)]
+public partial class TASK {
+
+ private object itemField;
+
+ private ItemChoiceType itemElementNameField;
+
+ /// <remarks/>
+ [System.Xml.Serialization.XmlElementAttribute("OptionA", typeof(object), Order=0)]
+ [System.Xml.Serialization.XmlElementAttribute("OptionB", typeof(object), Order=0)]
+ [System.Xml.Serialization.XmlChoiceIdentifierAttribute("ItemElementName")]
+ public object Item {
+ get {
+ return this.itemField;
+ }
+ set {
+ this.itemField = value;
+ }
+ }
+
+ /// <remarks/>
+ [System.Xml.Serialization.XmlElementAttribute(Order=1)]
+ [System.Xml.Serialization.XmlIgnoreAttribute()]
+ public ItemChoiceType ItemElementName {
+ get {
+ return this.itemElementNameField;
+ }
+ set {
+ this.itemElementNameField = value;
+ }
+ }
+}
+
+/// <remarks/>
+[System.CodeDom.Compiler.GeneratedCodeAttribute("xsd", "4.0.30319.1")]
+[System.SerializableAttribute()]
+[System.Xml.Serialization.XmlTypeAttribute(IncludeInSchema=false)]
+public enum ItemChoiceType {
+
+ /// <remarks/>
+ OptionA,
+
+ /// <remarks/>
+ OptionB,
+}
#define MONO_GC_END_ENABLED() (0)
-#define MONO_GC_LOCKED()
-#define MONO_GC_LOCKED_ENABLED() (0)
+#define MONO_GC_CONCURRENT_START_BEGIN(generation)
+#define MONO_GC_CONCURRENT_START_BEGIN_ENABLED() (0)
-#define MONO_GC_UNLOCKED()
-#define MONO_GC_UNLOCKED_ENABLED() (0)
+#define MONO_GC_CONCURRENT_START_END(generation,num_major_objects_marked)
+#define MONO_GC_CONCURRENT_START_END_ENABLED() (0)
+
+#define MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN(generation,num_major_objects_marked)
+#define MONO_GC_CONCURRENT_UPDATE_FINISH_BEGIN_ENABLED() (0)
+
+#define MONO_GC_CONCURRENT_UPDATE_END(generation,num_major_objects_marked)
+#define MONO_GC_CONCURRENT_UPDATE_END_ENABLED() (0)
+
+#define MONO_GC_CONCURRENT_FINISH_END(generation,num_major_objects_marked)
+#define MONO_GC_CONCURRENT_FINISH_END_ENABLED() (0)
+
+
+#define MONO_GC_SWEEP_BEGIN(generation,full_sweep)
+#define MONO_GC_SWEEP_BEGIN_ENABLED() (0)
+
+#define MONO_GC_SWEEP_END(generation,full_sweep)
+#define MONO_GC_SWEEP_END_ENABLED() (0)
+
+
+#define MONO_GC_WORLD_STOP_BEGIN()
+#define MONO_GC_WORLD_STOP_BEGIN_ENABLED() (0)
+
+#define MONO_GC_WORLD_STOP_END()
+#define MONO_GC_WORLD_STOP_END_ENABLED() (0)
+
+#define MONO_GC_WORLD_RESTART_BEGIN(generation)
+#define MONO_GC_WORLD_RESTART_BEGIN_ENABLED() (0)
+
+#define MONO_GC_WORLD_RESTART_END(generation)
+#define MONO_GC_WORLD_RESTART_END_ENABLED() (0)
#define MONO_GC_HEAP_ALLOC(addr,size)
+#define MONO_GC_HEAP_ALLOC_ENABLED() (0)
+
#define MONO_GC_HEAP_FREE(addr,size)
+#define MONO_GC_HEAP_FREE_ENABLED() (0)
+
+
+#define MONO_GC_LOCKED()
+#define MONO_GC_LOCKED_ENABLED() (0)
+
+#define MONO_GC_UNLOCKED()
+#define MONO_GC_UNLOCKED_ENABLED() (0)
#define MONO_GC_NURSERY_TLAB_ALLOC(addr,len)
#define MONO_GC_OBJ_PINNED(addr,size,ns_name,class_name,generation)
#define MONO_GC_OBJ_PINNED_ENABLED() (0)
+
+#define MONO_GC_FINALIZE_ENQUEUE(addr,size,ns_name,class_name,generation,is_critical)
+#define MONO_GC_FINALIZE_ENQUEUE_ENABLED() (0)
+
+#define MONO_GC_FINALIZE_INVOKE(addr,size,ns_name,class_name)
+#define MONO_GC_FINALIZE_INVOKE_ENABLED() (0)
+
+
+#define MONO_GC_WEAK_UPDATE(ref_addr,old_addr,new_addr,size,ns_name,class_name,track)
+#define MONO_GC_WEAK_UPDATE_ENABLED() (0)
+
#endif
#endif
if (fread (&type, 1, 1, in) != 1)
return SGEN_PROTOCOL_EOF;
switch (type) {
- case SGEN_PROTOCOL_COLLECTION: size = sizeof (SGenProtocolCollection); break;
+ case SGEN_PROTOCOL_COLLECTION_BEGIN: size = sizeof (SGenProtocolCollection); break;
+ case SGEN_PROTOCOL_COLLECTION_END: size = sizeof (SGenProtocolCollection); break;
case SGEN_PROTOCOL_ALLOC: size = sizeof (SGenProtocolAlloc); break;
case SGEN_PROTOCOL_ALLOC_PINNED: size = sizeof (SGenProtocolAlloc); break;
case SGEN_PROTOCOL_ALLOC_DEGRADED: size = sizeof (SGenProtocolAlloc); break;
print_entry (int type, void *data)
{
switch (type) {
- case SGEN_PROTOCOL_COLLECTION: {
+ case SGEN_PROTOCOL_COLLECTION_BEGIN: {
SGenProtocolCollection *entry = data;
- printf ("collection %d generation %d\n", entry->index, entry->generation);
+ printf ("collection begin %d generation %d\n", entry->index, entry->generation);
+ break;
+ }
+ case SGEN_PROTOCOL_COLLECTION_END: {
+ SGenProtocolCollection *entry = data;
+ printf ("collection end %d generation %d\n", entry->index, entry->generation);
break;
}
case SGEN_PROTOCOL_ALLOC: {
is_match (gpointer ptr, int type, void *data)
{
switch (type) {
- case SGEN_PROTOCOL_COLLECTION:
+ case SGEN_PROTOCOL_COLLECTION_BEGIN:
+ case SGEN_PROTOCOL_COLLECTION_END:
case SGEN_PROTOCOL_THREAD_SUSPEND:
case SGEN_PROTOCOL_THREAD_RESTART:
case SGEN_PROTOCOL_THREAD_REGISTER: